kinemotion 0.40.0__py3-none-any.whl → 0.41.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -22,6 +22,7 @@ from .smoothing import (
22
22
  smooth_landmarks,
23
23
  smooth_landmarks_advanced,
24
24
  )
25
+ from .timing import PerformanceTimer
25
26
  from .video_io import VideoProcessor
26
27
 
27
28
  __all__ = [
@@ -46,6 +47,8 @@ __all__ = [
46
47
  "QualityIndicators",
47
48
  "assess_jump_quality",
48
49
  "calculate_position_stability",
50
+ # Timing
51
+ "PerformanceTimer",
49
52
  # Video I/O
50
53
  "VideoProcessor",
51
54
  ]
@@ -121,6 +121,8 @@ class BaseDebugOverlayRenderer:
121
121
  self.height = height
122
122
  self.display_width = display_width
123
123
  self.display_height = display_height
124
+ # Duration of ffmpeg re-encoding (0.0 if not needed)
125
+ self.reencode_duration_s = 0.0
124
126
  self.writer, self.needs_resize, self.used_codec = create_video_writer(
125
127
  output_path, width, height, display_width, display_height, fps
126
128
  )
@@ -201,8 +203,8 @@ class BaseDebugOverlayRenderer:
201
203
  stdout=subprocess.DEVNULL,
202
204
  stderr=subprocess.PIPE,
203
205
  )
204
- reencode_duration = time.time() - reencode_start
205
- print(f"Debug video re-encoded in {reencode_duration:.2f}s")
206
+ self.reencode_duration_s = time.time() - reencode_start
207
+ print(f"Debug video re-encoded in {self.reencode_duration_s:.2f}s")
206
208
 
207
209
  # Overwrite original file
208
210
  os.replace(temp_path, self.output_path)
@@ -49,21 +49,29 @@ class ProcessingInfo:
49
49
  timestamp: ISO 8601 timestamp of when analysis was performed
50
50
  quality_preset: Quality preset used ("fast", "balanced", "accurate")
51
51
  processing_time_s: Time taken to process video in seconds
52
+ timing_breakdown: Optional dict mapping stage names to duration in seconds
52
53
  """
53
54
 
54
55
  version: str
55
56
  timestamp: str
56
57
  quality_preset: str
57
58
  processing_time_s: float
59
+ timing_breakdown: dict[str, float] | None = None
58
60
 
59
61
  def to_dict(self) -> dict:
60
62
  """Convert to JSON-serializable dictionary."""
61
- return {
63
+ result: dict = {
62
64
  "version": self.version,
63
65
  "timestamp": self.timestamp,
64
66
  "quality_preset": self.quality_preset,
65
67
  "processing_time_s": round(self.processing_time_s, 3),
66
68
  }
69
+ if self.timing_breakdown:
70
+ result["timing_breakdown_ms"] = {
71
+ stage: round(duration * 1000, 1)
72
+ for stage, duration in self.timing_breakdown.items()
73
+ }
74
+ return result
67
75
 
68
76
 
69
77
  @dataclass
kinemotion/core/pose.py CHANGED
@@ -4,6 +4,8 @@ import cv2
4
4
  import mediapipe as mp
5
5
  import numpy as np
6
6
 
7
+ from .timing import PerformanceTimer
8
+
7
9
 
8
10
  class PoseTracker:
9
11
  """Tracks human pose landmarks in video frames using MediaPipe."""
@@ -12,14 +14,17 @@ class PoseTracker:
12
14
  self,
13
15
  min_detection_confidence: float = 0.5,
14
16
  min_tracking_confidence: float = 0.5,
15
- ):
17
+ timer: PerformanceTimer | None = None,
18
+ ) -> None:
16
19
  """
17
20
  Initialize the pose tracker.
18
21
 
19
22
  Args:
20
23
  min_detection_confidence: Minimum confidence for pose detection
21
24
  min_tracking_confidence: Minimum confidence for pose tracking
25
+ timer: Optional PerformanceTimer for measuring operations
22
26
  """
27
+ self.timer = timer
23
28
  self.mp_pose = mp.solutions.pose
24
29
  self.pose = self.mp_pose.Pose(
25
30
  static_image_mode=False, # Use tracking mode for better performance
@@ -45,7 +50,11 @@ class PoseTracker:
45
50
  rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
46
51
 
47
52
  # Process the frame
48
- results = self.pose.process(rgb_frame)
53
+ if self.timer:
54
+ with self.timer.measure("mediapipe_inference"):
55
+ results = self.pose.process(rgb_frame)
56
+ else:
57
+ results = self.pose.process(rgb_frame)
49
58
 
50
59
  if not results.pose_landmarks:
51
60
  return None
@@ -0,0 +1,49 @@
1
+ """Timing utilities for performance profiling."""
2
+
3
+ import time
4
+ from collections.abc import Generator
5
+ from contextlib import contextmanager
6
+
7
+
8
+ class PerformanceTimer:
9
+ """Simple timer for tracking execution duration of named steps.
10
+
11
+ Uses context manager pattern for clean, testable timing instrumentation.
12
+ Accumulates timing data in metrics dictionary accessible via get_metrics().
13
+ """
14
+
15
+ def __init__(self) -> None:
16
+ """Initialize timer with empty metrics dictionary."""
17
+ self.metrics: dict[str, float] = {}
18
+
19
+ @contextmanager
20
+ def measure(self, name: str) -> Generator[None, None, None]:
21
+ """Context manager to measure execution time of a block.
22
+
23
+ Args:
24
+ name: Name of the step being measured (e.g., "pose_tracking")
25
+
26
+ Yields:
27
+ None
28
+
29
+ Example:
30
+ timer = PerformanceTimer()
31
+ with timer.measure("video_initialization"):
32
+ # code to measure
33
+ pass
34
+ metrics = timer.get_metrics() # {"video_initialization": 0.123}
35
+ """
36
+ start_time = time.time()
37
+ try:
38
+ yield
39
+ finally:
40
+ duration = time.time() - start_time
41
+ self.metrics[name] = duration
42
+
43
+ def get_metrics(self) -> dict[str, float]:
44
+ """Get collected timing metrics in seconds.
45
+
46
+ Returns:
47
+ A copy of the metrics dictionary to prevent external modification.
48
+ """
49
+ return self.metrics.copy()
@@ -7,6 +7,8 @@ import warnings
7
7
  import cv2
8
8
  import numpy as np
9
9
 
10
+ from .timing import PerformanceTimer
11
+
10
12
 
11
13
  class VideoProcessor:
12
14
  """
@@ -16,14 +18,16 @@ class VideoProcessor:
16
18
  No dimensions are hardcoded - all dimensions are extracted from actual frame data.
17
19
  """
18
20
 
19
- def __init__(self, video_path: str):
21
+ def __init__(self, video_path: str, timer: PerformanceTimer | None = None) -> None:
20
22
  """
21
23
  Initialize video processor.
22
24
 
23
25
  Args:
24
26
  video_path: Path to input video file
27
+ timer: Optional PerformanceTimer for measuring operations
25
28
  """
26
29
  self.video_path = video_path
30
+ self.timer = timer
27
31
  self.cap = cv2.VideoCapture(video_path)
28
32
 
29
33
  if not self.cap.isOpened():
@@ -175,7 +179,12 @@ class VideoProcessor:
175
179
  OpenCV ignores rotation metadata, so we manually apply rotation
176
180
  based on the display matrix metadata extracted from the video.
177
181
  """
178
- ret, frame = self.cap.read()
182
+ if self.timer:
183
+ with self.timer.measure("frame_read"):
184
+ ret, frame = self.cap.read()
185
+ else:
186
+ ret, frame = self.cap.read()
187
+
179
188
  if not ret:
180
189
  return None
181
190
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.40.0
3
+ Version: 0.41.1
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,5 +1,5 @@
1
1
  kinemotion/__init__.py,sha256=wPItmyGJUOFM6GPRVhAEvRz0-ErI7e2qiUREYJ9EfPQ,943
2
- kinemotion/api.py,sha256=1MsBvkznlP_OaBZ37a0hG9uMTESv2iJMHR01tnPv5sA,41046
2
+ kinemotion/api.py,sha256=B_orKAJ5KNsL5zse5B0s4pumT_4OcAVoxdSEa3N9qMY,52843
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
5
  kinemotion/cmj/analysis.py,sha256=qtULzp9uYzm5M0_Qu5YGJpuwjg9fz1VKAg6xg4NJxvM,21639
@@ -9,20 +9,21 @@ kinemotion/cmj/joint_angles.py,sha256=HmheIEiKcQz39cRezk4h-htorOhGNPsqKIR9RsAEKt
9
9
  kinemotion/cmj/kinematics.py,sha256=Lq9m9MNQxnXv31VhKmXVrlM7rRkhi8PxW50N_CC8_8Y,11860
10
10
  kinemotion/cmj/metrics_validator.py,sha256=V_fmlczYH06SBtwqESv-IfGi3wDsIy3RQbd7VwOyNo0,31359
11
11
  kinemotion/cmj/validation_bounds.py,sha256=9ZTo68fl3ooyWjXXyTMRLpK9tFANa_rQf3oHhq7iQGE,11995
12
- kinemotion/core/__init__.py,sha256=HsqolRa60cW3vrG8F9Lvr9WvWcs5hCmsTzSgo7imi-4,1278
12
+ kinemotion/core/__init__.py,sha256=GTLnE_gGIk7HC51epWUXVuNxcvS5lf7UL6qeWRlgMV0,1352
13
13
  kinemotion/core/auto_tuning.py,sha256=wtCUMOhBChVJNXfEeku3GCMW4qED6MF-O_mv2sPTiVQ,11324
14
14
  kinemotion/core/cli_utils.py,sha256=zbnifPhD-OYofJioeYfJtshuWcl8OAEWtqCGVF4ctAI,7966
15
- kinemotion/core/debug_overlay_utils.py,sha256=-HRHw5Ew7hVH0MWwdutZV1JwPqQwzQ6Jex7xHWc3q88,7511
15
+ kinemotion/core/debug_overlay_utils.py,sha256=sUx_A14iZkJJjmUVeqvD_g2zAO9PwlzF3xrP4FLuiTc,7625
16
16
  kinemotion/core/determinism.py,sha256=NwVrHqJiVxxFHTBPVy8aDBJH2SLIcYIpdGFp7glblB8,2515
17
17
  kinemotion/core/experimental.py,sha256=IK05AF4aZS15ke85hF3TWCqRIXU1AlD_XKzFz735Ua8,3640
18
18
  kinemotion/core/filtering.py,sha256=GsC9BB71V07LJJHgS2lsaxUAtJsupcUiwtZFDgODh8c,11417
19
19
  kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
20
- kinemotion/core/metadata.py,sha256=iz9YdkesHo-85TVBCoQVn7zkbrSde_fqjU79s_b-TZk,6829
21
- kinemotion/core/pose.py,sha256=MQa7ebbuvk_vxJzVlwARKvEaJOqSFJMRRap2dz0O__0,8613
20
+ kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
21
+ kinemotion/core/pose.py,sha256=Tq4VS0YmMzrprVUsELm6FQczyLhP8UKurM9ccYn1LLU,8959
22
22
  kinemotion/core/quality.py,sha256=dPGQp08y8DdEUbUdjTThnUOUsALgF0D2sdz50cm6wLI,13098
23
23
  kinemotion/core/smoothing.py,sha256=GAfC-jxu1eqNyDjsUXqUBicKx9um5hrk49wz1FxfRNM,15219
24
+ kinemotion/core/timing.py,sha256=bdRg1g7J0-eWB3oj7tEF5Ucp_tiad1IxsM14edAZQu4,1484
24
25
  kinemotion/core/validation.py,sha256=LmKfSl4Ayw3DgwKD9IrhsPdzp5ia4drLsHA2UuU1SCM,6310
25
- kinemotion/core/video_io.py,sha256=fDdyYVIKqUSgCjBJa8l_S0SrDPDAhrWYfsDBNRuz1oM,7549
26
+ kinemotion/core/video_io.py,sha256=Nn5xIt4iZCQxaP9g0cpWFeNsz5AgUlWbZ_B8ZCfd4lw,7869
26
27
  kinemotion/dropjump/__init__.py,sha256=tC3H3BrCg8Oj-db-Vrtx4PH_llR1Ppkd5jwaOjhQcLg,862
27
28
  kinemotion/dropjump/analysis.py,sha256=MjxO-vps0nz_hXlnGk7cgq3jFenJYzsM0VVpHwnHXsM,27935
28
29
  kinemotion/dropjump/cli.py,sha256=n_Wfv3AC6YIgRPYhO3F2nTSai0NR7fh95nAoWjryQeY,16250
@@ -31,8 +32,8 @@ kinemotion/dropjump/kinematics.py,sha256=kH-XM66wlOCYMpjvyb6_Qh5ZebyOfFZ47rmhgE1
31
32
  kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
32
33
  kinemotion/dropjump/validation_bounds.py,sha256=5b4I3CKPybuvrbn-nP5yCcGF_sH4Vtyw3a5AWWvWnBk,4645
33
34
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- kinemotion-0.40.0.dist-info/METADATA,sha256=0X_uEEzEi-DH9d4HwfseQVEzlDmbJRw96Dn1o-m5DbI,26020
35
- kinemotion-0.40.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
- kinemotion-0.40.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
37
- kinemotion-0.40.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
38
- kinemotion-0.40.0.dist-info/RECORD,,
35
+ kinemotion-0.41.1.dist-info/METADATA,sha256=0hR96r4xCgvlzmGt2CEiwwxZ1k7sTEVLLAHLSZa99OY,26020
36
+ kinemotion-0.41.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
+ kinemotion-0.41.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
38
+ kinemotion-0.41.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
39
+ kinemotion-0.41.1.dist-info/RECORD,,