kinemotion 0.24.0__py3-none-any.whl → 0.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -6,37 +6,15 @@ import json
6
6
  import sys
7
7
  from dataclasses import dataclass
8
8
  from pathlib import Path
9
- from typing import Any
10
9
 
11
10
  import click
12
- import numpy as np
13
11
 
14
12
  from ..api import (
15
13
  DropJumpVideoConfig,
16
14
  DropJumpVideoResult,
15
+ process_dropjump_video,
17
16
  process_dropjump_videos_bulk,
18
17
  )
19
- from ..core.auto_tuning import (
20
- QualityPreset,
21
- analyze_video_sample,
22
- auto_tune_parameters,
23
- )
24
- from ..core.cli_utils import (
25
- apply_expert_param_overrides,
26
- determine_initial_confidence,
27
- print_auto_tuned_params,
28
- smooth_landmark_sequence,
29
- track_all_frames,
30
- )
31
- from ..core.pose import PoseTracker
32
- from ..core.video_io import VideoProcessor
33
- from .analysis import (
34
- ContactState,
35
- detect_ground_contact,
36
- extract_foot_positions_and_visibilities,
37
- )
38
- from .debug_overlay import DebugOverlayRenderer
39
- from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
40
18
 
41
19
 
42
20
  @dataclass
@@ -250,81 +228,6 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual parameter
250
228
  )
251
229
 
252
230
 
253
- def _extract_positions_and_visibilities(
254
- smoothed_landmarks: list,
255
- ) -> tuple[np.ndarray, np.ndarray]:
256
- """Extract vertical positions and visibilities from landmarks.
257
-
258
- Args:
259
- smoothed_landmarks: Smoothed landmark sequence
260
-
261
- Returns:
262
- Tuple of (vertical_positions, visibilities)
263
- """
264
- click.echo("Extracting foot positions...", err=True)
265
- return extract_foot_positions_and_visibilities(smoothed_landmarks)
266
-
267
-
268
- def _create_debug_video(
269
- output: str,
270
- video: VideoProcessor,
271
- frames: list,
272
- smoothed_landmarks: list,
273
- contact_states: list[ContactState],
274
- metrics: DropJumpMetrics,
275
- ) -> None:
276
- """Generate debug video with overlays.
277
-
278
- Args:
279
- output: Output video path
280
- video: Video processor
281
- frames: Video frames
282
- smoothed_landmarks: Smoothed landmarks
283
- contact_states: Contact states
284
- metrics: Calculated metrics
285
- """
286
- click.echo(f"Generating debug video: {output}", err=True)
287
- if video.display_width != video.width or video.display_height != video.height:
288
- click.echo(f"Source video encoded: {video.width}x{video.height}", err=True)
289
- click.echo(
290
- f"Output dimensions: {video.display_width}x{video.display_height} "
291
- f"(respecting display aspect ratio)",
292
- err=True,
293
- )
294
- else:
295
- click.echo(
296
- f"Output dimensions: {video.width}x{video.height} "
297
- f"(matching source video aspect ratio)",
298
- err=True,
299
- )
300
-
301
- with DebugOverlayRenderer(
302
- output,
303
- video.width,
304
- video.height,
305
- video.display_width,
306
- video.display_height,
307
- video.fps,
308
- ) as renderer:
309
- render_bar: Any
310
- with click.progressbar(
311
- length=len(frames), label="Rendering frames"
312
- ) as render_bar:
313
- for i, frame in enumerate(frames):
314
- annotated = renderer.render_frame(
315
- frame,
316
- smoothed_landmarks[i],
317
- contact_states[i],
318
- i,
319
- metrics,
320
- use_com=False,
321
- )
322
- renderer.write_frame(annotated)
323
- render_bar.update(1)
324
-
325
- click.echo(f"Debug video saved: {output}", err=True)
326
-
327
-
328
231
  def _process_single(
329
232
  video_path: str,
330
233
  output: str | None,
@@ -333,96 +236,38 @@ def _process_single(
333
236
  verbose: bool,
334
237
  expert_params: AnalysisParameters,
335
238
  ) -> None:
336
- """Process a single video (original CLI behavior)."""
239
+ """Process a single video by calling the API."""
337
240
  click.echo(f"Analyzing video: {video_path}", err=True)
338
241
 
339
- quality_preset = QualityPreset(quality.lower())
340
-
341
242
  try:
342
- with VideoProcessor(video_path) as video:
343
- click.echo(
344
- f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
345
- f"{video.frame_count} frames",
346
- err=True,
347
- )
348
-
349
- # Determine confidence levels
350
- detection_conf, tracking_conf = determine_initial_confidence(
351
- quality_preset, expert_params
352
- )
353
-
354
- # Track all frames
355
- tracker = PoseTracker(
356
- min_detection_confidence=detection_conf,
357
- min_tracking_confidence=tracking_conf,
358
- )
359
- frames, landmarks_sequence = track_all_frames(video, tracker)
360
-
361
- if not landmarks_sequence:
362
- click.echo("Error: No frames processed", err=True)
363
- sys.exit(1)
364
-
365
- # Auto-tune parameters
366
- characteristics = analyze_video_sample(
367
- landmarks_sequence, video.fps, video.frame_count
368
- )
369
- params = auto_tune_parameters(characteristics, quality_preset)
370
- params = apply_expert_param_overrides(params, expert_params)
371
-
372
- # Show parameters if verbose
373
- if verbose:
374
- print_auto_tuned_params(video, quality_preset, params, characteristics)
375
-
376
- # Apply smoothing
377
- smoothed_landmarks = smooth_landmark_sequence(landmarks_sequence, params)
378
-
379
- # Extract positions
380
- vertical_positions, visibilities = _extract_positions_and_visibilities(
381
- smoothed_landmarks
382
- )
383
-
384
- # Detect ground contact
385
- contact_states = detect_ground_contact(
386
- vertical_positions,
387
- velocity_threshold=params.velocity_threshold,
388
- min_contact_frames=params.min_contact_frames,
389
- visibility_threshold=params.visibility_threshold,
390
- visibilities=visibilities,
391
- window_length=params.smoothing_window,
392
- polyorder=params.polyorder,
393
- )
394
-
395
- # Calculate metrics
396
- click.echo("Calculating metrics...", err=True)
397
- metrics = calculate_drop_jump_metrics(
398
- contact_states,
399
- vertical_positions,
400
- video.fps,
401
- drop_start_frame=expert_params.drop_start_frame,
402
- velocity_threshold=params.velocity_threshold,
403
- smoothing_window=params.smoothing_window,
404
- polyorder=params.polyorder,
405
- use_curvature=params.use_curvature,
406
- )
407
-
408
- # Output metrics
409
- metrics_json = json.dumps(metrics.to_dict(), indent=2)
410
- if json_output:
411
- Path(json_output).write_text(metrics_json)
412
- click.echo(f"Metrics written to: {json_output}", err=True)
413
- else:
414
- click.echo(metrics_json)
243
+ # Call the API function (handles all processing logic)
244
+ metrics = process_dropjump_video(
245
+ video_path=video_path,
246
+ quality=quality,
247
+ output_video=output,
248
+ json_output=json_output,
249
+ drop_start_frame=expert_params.drop_start_frame,
250
+ smoothing_window=expert_params.smoothing_window,
251
+ velocity_threshold=expert_params.velocity_threshold,
252
+ min_contact_frames=expert_params.min_contact_frames,
253
+ visibility_threshold=expert_params.visibility_threshold,
254
+ detection_confidence=expert_params.detection_confidence,
255
+ tracking_confidence=expert_params.tracking_confidence,
256
+ verbose=verbose,
257
+ )
415
258
 
416
- # Generate debug video if requested
417
- if output:
418
- _create_debug_video(
419
- output, video, frames, smoothed_landmarks, contact_states, metrics
420
- )
259
+ # Print formatted summary to stdout if no JSON output specified
260
+ if not json_output:
261
+ click.echo(json.dumps(metrics.to_dict(), indent=2))
421
262
 
422
- click.echo("Analysis complete!", err=True)
263
+ click.echo("Analysis complete!", err=True)
423
264
 
424
265
  except Exception as e:
425
266
  click.echo(f"Error: {str(e)}", err=True)
267
+ if verbose:
268
+ import traceback
269
+
270
+ traceback.print_exc()
426
271
  sys.exit(1)
427
272
 
428
273
 
@@ -1,6 +1,6 @@
1
1
  """Kinematic calculations for drop-jump metrics."""
2
2
 
3
- from typing import TypedDict
3
+ from typing import TYPE_CHECKING, TypedDict
4
4
 
5
5
  import numpy as np
6
6
  from numpy.typing import NDArray
@@ -14,9 +14,13 @@ from .analysis import (
14
14
  find_landing_from_acceleration,
15
15
  )
16
16
 
17
+ if TYPE_CHECKING:
18
+ from ..core.metadata import ResultMetadata
19
+ from ..core.quality import QualityAssessment
17
20
 
18
- class DropJumpMetricsDict(TypedDict, total=False):
19
- """Type-safe dictionary for drop jump metrics JSON output."""
21
+
22
+ class DropJumpDataDict(TypedDict, total=False):
23
+ """Type-safe dictionary for drop jump measurement data."""
20
24
 
21
25
  ground_contact_time_ms: float | None
22
26
  flight_time_ms: float | None
@@ -34,6 +38,13 @@ class DropJumpMetricsDict(TypedDict, total=False):
34
38
  flight_end_frame_precise: float | None
35
39
 
36
40
 
41
+ class DropJumpResultDict(TypedDict):
42
+ """Type-safe dictionary for complete drop jump result with data and metadata."""
43
+
44
+ data: DropJumpDataDict
45
+ metadata: dict # ResultMetadata.to_dict()
46
+
47
+
37
48
  class DropJumpMetrics:
38
49
  """Container for drop-jump analysis metrics."""
39
50
 
@@ -53,10 +64,18 @@ class DropJumpMetrics:
53
64
  self.contact_end_frame_precise: float | None = None
54
65
  self.flight_start_frame_precise: float | None = None
55
66
  self.flight_end_frame_precise: float | None = None
56
-
57
- def to_dict(self) -> DropJumpMetricsDict:
58
- """Convert metrics to dictionary for JSON output."""
59
- return {
67
+ # Quality assessment
68
+ self.quality_assessment: QualityAssessment | None = None
69
+ # Complete metadata
70
+ self.result_metadata: ResultMetadata | None = None
71
+
72
+ def to_dict(self) -> DropJumpResultDict:
73
+ """Convert metrics to JSON-serializable dictionary with data/metadata structure.
74
+
75
+ Returns:
76
+ Dictionary with nested data and metadata structure.
77
+ """
78
+ data: DropJumpDataDict = {
60
79
  "ground_contact_time_ms": (
61
80
  round(self.ground_contact_time * 1000, 2)
62
81
  if self.ground_contact_time is not None
@@ -127,6 +146,18 @@ class DropJumpMetrics:
127
146
  ),
128
147
  }
129
148
 
149
+ # Build metadata from ResultMetadata if available, otherwise use legacy quality
150
+ if self.result_metadata is not None:
151
+ metadata = self.result_metadata.to_dict()
152
+ elif self.quality_assessment is not None:
153
+ # Fallback for backwards compatibility during transition
154
+ metadata = {"quality": self.quality_assessment.to_dict()}
155
+ else:
156
+ # No metadata available
157
+ metadata = {}
158
+
159
+ return {"data": data, "metadata": metadata}
160
+
130
161
 
131
162
  def _determine_drop_start_frame(
132
163
  drop_start_frame: int | None,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.24.0
3
+ Version: 0.25.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -79,17 +79,42 @@ Description-Content-Type: text/markdown
79
79
  - **Metrics**: Jump height, flight time, countermovement depth, eccentric/concentric durations
80
80
  - **Validated accuracy**: 50.6cm jump (±1 frame precision)
81
81
 
82
- ## Validation Status
82
+ ## ⚠️ Validation Status
83
83
 
84
- ⚠️ **IMPORTANT**: This tool's accuracy has **not been validated** against gold standard measurements (force plates, 3D motion capture). All accuracy claims and improvement estimates are theoretical and based on algorithmic considerations, not empirical testing.
84
+ **Current Status:** Pre-validation (not validated against force plates or motion capture systems)
85
85
 
86
- The tool provides consistent measurements and may be useful for:
86
+ ### What This Tool IS Suitable For
87
87
 
88
- - Tracking relative changes in an individual athlete over time
89
- - Comparing similar jumps under controlled conditions
90
- - Exploratory analysis and research
88
+ ✅ **Training monitoring** - Track relative changes within the same athlete over time
89
+ ✅ **Educational purposes** - Learn about jump biomechanics and video analysis
90
+ **Exploratory analysis** - Initial investigation before formal testing
91
+ ✅ **Proof-of-concept research** - Demonstrate feasibility of video-based methods
91
92
 
92
- For clinical, research, or performance assessment requiring validated accuracy, this tool should be compared against validated measurement systems before use.
93
+ ### What This Tool IS NOT Suitable For
94
+
95
+ ❌ **Research publications** - As a validated measurement instrument
96
+ ❌ **Clinical decision-making** - Injury assessment, return-to-play decisions
97
+ ❌ **Talent identification** - Absolute performance comparisons between athletes
98
+ ❌ **Legal/insurance assessments** - Any context requiring validated measurements
99
+ ❌ **High-stakes testing** - Draft combines, professional athlete evaluation
100
+
101
+ ### Known Limitations
102
+
103
+ - **No force plate validation** - Accuracy claims are theoretical, not empirical
104
+ - **MediaPipe constraints** - Accuracy affected by lighting, clothing, occlusion, camera quality
105
+ - **Lower sampling rate** - Typical video (30-60fps) vs validated apps (120-240Hz)
106
+ - **Indirect measurement** - Landmarks → CoM estimation introduces potential error
107
+ - **No correction factors** - Unlike validated tools (e.g., MyJump), no systematic bias corrections applied
108
+
109
+ ### Recommended Use
110
+
111
+ If you need validated measurements for research or clinical use, consider:
112
+
113
+ - **Commercial validated apps**: MyJump 2, MyJumpLab (smartphone-based, force plate validated)
114
+ - **Laboratory equipment**: Force plates, optical motion capture systems
115
+ - **Validation testing**: Compare kinemotion against validated equipment in your specific use case
116
+
117
+ For detailed validation status and roadmap, see [`docs/validation-status.md`](docs/validation-status.md).
93
118
 
94
119
  ## Setup
95
120
 
@@ -242,6 +267,55 @@ kinemotion cmj-analyze videos/*.mp4 --batch --workers 4 \
242
267
  --csv-summary summary.csv
243
268
  ```
244
269
 
270
+ ### Quality Indicators & Confidence Scores
271
+
272
+ All analysis outputs include automatic quality assessment to help you know when to trust results:
273
+
274
+ ```json
275
+ {
276
+ "jump_height_m": 0.352,
277
+ "flight_time_s": 0.534,
278
+ "confidence": "high",
279
+ "quality_score": 87.3,
280
+ "quality_indicators": {
281
+ "avg_visibility": 0.89,
282
+ "min_visibility": 0.82,
283
+ "tracking_stable": true,
284
+ "phase_detection_clear": true,
285
+ "outliers_detected": 2,
286
+ "outlier_percentage": 1.5,
287
+ "position_variance": 0.0008,
288
+ "fps": 60.0
289
+ },
290
+ "warnings": []
291
+ }
292
+ ```
293
+
294
+ **Confidence Levels:**
295
+
296
+ - **High** (score ≥75): Trust these results, good tracking quality
297
+ - **Medium** (score 50-74): Use with caution, check quality indicators
298
+ - **Low** (score \<50): Results may be unreliable, review warnings
299
+
300
+ **Common Warnings:**
301
+
302
+ - Poor lighting or occlusion detected
303
+ - Unstable landmark tracking (jitter)
304
+ - High outlier rate (tracking glitches)
305
+ - Low frame rate (\<30fps)
306
+ - Unclear phase transitions
307
+
308
+ **Filtering by Quality:**
309
+
310
+ ```python
311
+ # Only use high-confidence results
312
+ metrics = process_cmj_video("video.mp4")
313
+ if metrics.quality_assessment.confidence == "high":
314
+ print(f"Reliable jump height: {metrics.jump_height:.3f}m")
315
+ else:
316
+ print(f"Low quality - warnings: {metrics.quality_assessment.warnings}")
317
+ ```
318
+
245
319
  ## Python API
246
320
 
247
321
  Use kinemotion as a library for automated pipelines and custom analysis.
@@ -1,28 +1,30 @@
1
1
  kinemotion/__init__.py,sha256=vAEIg-oDX1ZkQMnWgXd__tekaA5KUcEvdJSAGWS8VUY,722
2
- kinemotion/api.py,sha256=T9oqDxelyrVPhWifxUV8BVm8lu9sTREBLkEbT9fr678,31360
2
+ kinemotion/api.py,sha256=3oLJEjtHweG85t_BG1nCWnZ-8yl3tGW_6ZoBAILMfJw,38006
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
5
  kinemotion/cmj/analysis.py,sha256=4HYGn4VDIB6oExAees-VcPfpNgWOltpgwjyNTU7YAb4,18263
6
- kinemotion/cmj/cli.py,sha256=bmDvNvL7cu65-R8YkRIZYKD0nuTA0IJnWLcLlH_kFm0,16843
6
+ kinemotion/cmj/cli.py,sha256=12FEfWrseG4kCUbgHHdBPkWp6zzVQ0VAzfgNJotArmM,10792
7
7
  kinemotion/cmj/debug_overlay.py,sha256=D-y2FQKI01KY0WXFKTKg6p9Qj3AkXCE7xjau3Ais080,15886
8
8
  kinemotion/cmj/joint_angles.py,sha256=8ucpDGPvbt4iX3tx9eVxJEUv0laTm2Y58_--VzJCogE,9113
9
- kinemotion/cmj/kinematics.py,sha256=bCtAQY2DIX2JMMou1Z8_Wil3a0sJhpw19pl1CsPKnBg,8202
10
- kinemotion/core/__init__.py,sha256=3yzDhb5PekDNjydqrs8aWGneUGJBt-lB0SoB_Y2FXqU,1010
9
+ kinemotion/cmj/kinematics.py,sha256=4-YDbCq9e7JlyGl_R3W1tvo8iAkXhjNla9J5yevUSRk,9165
10
+ kinemotion/core/__init__.py,sha256=HsqolRa60cW3vrG8F9Lvr9WvWcs5hCmsTzSgo7imi-4,1278
11
11
  kinemotion/core/auto_tuning.py,sha256=j6cul_qC6k0XyryCG93C1AWH2MKPj3UBMzuX02xaqfI,11235
12
12
  kinemotion/core/cli_utils.py,sha256=Pq1JF7yvK1YbH0tOUWKjplthCbWsJQt4Lv7esPYH4FM,7254
13
13
  kinemotion/core/debug_overlay_utils.py,sha256=TyUb5okv5qw8oeaX3jsUO_kpwf1NnaHEAOTm-8LwTno,4587
14
14
  kinemotion/core/filtering.py,sha256=f-m-aA59e4WqE6u-9MA51wssu7rI-Y_7n1cG8IWdeRQ,11241
15
+ kinemotion/core/metadata.py,sha256=PyGHL6sx7Hj21lyorg2VsWP9BGTj_y_-wWU6eKCEfJo,6817
15
16
  kinemotion/core/pose.py,sha256=ztemdZ_ysVVK3gbXabm8qS_dr1VfJX9KZjmcO-Z-iNE,8532
17
+ kinemotion/core/quality.py,sha256=OC9nuf5IrQ9xURf3eA50VoNWOqkGwbjJpS90q2FDQzA,13082
16
18
  kinemotion/core/smoothing.py,sha256=x4o3BnG6k8OaV3emgpoJDF84CE9k5RYR7BeSYH_-8Es,14092
17
19
  kinemotion/core/video_io.py,sha256=0bJTheYidEqxGP5Y2dSO2x6sbOrnBDBu2TEiV8gT23A,7285
18
20
  kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
19
21
  kinemotion/dropjump/analysis.py,sha256=1AsIsgWg5wuwJo7poFK7aMCFr93yHVms-fEvaOGQQWs,27448
20
- kinemotion/dropjump/cli.py,sha256=J2F8ij-UcybY7YjK_bncQZiHNzrgS3Y7uTBkNo7y_L4,21328
22
+ kinemotion/dropjump/cli.py,sha256=ZyroaYPwz8TgfL39Wcaj6m68Awl6lYXC75ttaflU-c0,16236
21
23
  kinemotion/dropjump/debug_overlay.py,sha256=LkPw6ucb7beoYWS4L-Lvjs1KLCm5wAWDAfiznUeV2IQ,5668
22
- kinemotion/dropjump/kinematics.py,sha256=VZWdytkw58Vk9dsNe8U15sFB84kfZKLo4argvt0CTPM,16361
24
+ kinemotion/dropjump/kinematics.py,sha256=PaVakc8eiYR6ZErp2jO3A8Ey-rNIso0rGLft6-yOEzs,17510
23
25
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- kinemotion-0.24.0.dist-info/METADATA,sha256=q-Zj983PFHne1aGTqC7JU90XxUC6g0c_SFT1p1HPGXQ,20762
25
- kinemotion-0.24.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
- kinemotion-0.24.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
27
- kinemotion-0.24.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
28
- kinemotion-0.24.0.dist-info/RECORD,,
26
+ kinemotion-0.25.0.dist-info/METADATA,sha256=lJ39uLFmaTzqvZXJNw8eWBwwvWIeM2VQDd1D2cDcBUw,23244
27
+ kinemotion-0.25.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
+ kinemotion-0.25.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
29
+ kinemotion-0.25.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
30
+ kinemotion-0.25.0.dist-info/RECORD,,