kinemotion 0.10.6__py3-none-any.whl → 0.67.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (48) hide show
  1. kinemotion/__init__.py +31 -6
  2. kinemotion/api.py +39 -598
  3. kinemotion/cli.py +2 -0
  4. kinemotion/cmj/__init__.py +5 -0
  5. kinemotion/cmj/analysis.py +621 -0
  6. kinemotion/cmj/api.py +563 -0
  7. kinemotion/cmj/cli.py +324 -0
  8. kinemotion/cmj/debug_overlay.py +457 -0
  9. kinemotion/cmj/joint_angles.py +307 -0
  10. kinemotion/cmj/kinematics.py +360 -0
  11. kinemotion/cmj/metrics_validator.py +767 -0
  12. kinemotion/cmj/validation_bounds.py +341 -0
  13. kinemotion/core/__init__.py +28 -0
  14. kinemotion/core/auto_tuning.py +71 -37
  15. kinemotion/core/cli_utils.py +60 -0
  16. kinemotion/core/debug_overlay_utils.py +385 -0
  17. kinemotion/core/determinism.py +83 -0
  18. kinemotion/core/experimental.py +103 -0
  19. kinemotion/core/filtering.py +9 -6
  20. kinemotion/core/formatting.py +75 -0
  21. kinemotion/core/metadata.py +231 -0
  22. kinemotion/core/model_downloader.py +172 -0
  23. kinemotion/core/pipeline_utils.py +433 -0
  24. kinemotion/core/pose.py +298 -141
  25. kinemotion/core/pose_landmarks.py +67 -0
  26. kinemotion/core/quality.py +393 -0
  27. kinemotion/core/smoothing.py +250 -154
  28. kinemotion/core/timing.py +247 -0
  29. kinemotion/core/types.py +42 -0
  30. kinemotion/core/validation.py +201 -0
  31. kinemotion/core/video_io.py +135 -50
  32. kinemotion/dropjump/__init__.py +1 -1
  33. kinemotion/dropjump/analysis.py +367 -182
  34. kinemotion/dropjump/api.py +665 -0
  35. kinemotion/dropjump/cli.py +156 -466
  36. kinemotion/dropjump/debug_overlay.py +136 -206
  37. kinemotion/dropjump/kinematics.py +232 -255
  38. kinemotion/dropjump/metrics_validator.py +240 -0
  39. kinemotion/dropjump/validation_bounds.py +157 -0
  40. kinemotion/models/__init__.py +0 -0
  41. kinemotion/models/pose_landmarker_lite.task +0 -0
  42. kinemotion-0.67.0.dist-info/METADATA +726 -0
  43. kinemotion-0.67.0.dist-info/RECORD +47 -0
  44. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/WHEEL +1 -1
  45. kinemotion-0.10.6.dist-info/METADATA +0 -561
  46. kinemotion-0.10.6.dist-info/RECORD +0 -20
  47. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/entry_points.txt +0 -0
  48. {kinemotion-0.10.6.dist-info → kinemotion-0.67.0.dist-info}/licenses/LICENSE +0 -0
@@ -2,10 +2,13 @@
2
2
 
3
3
  import json
4
4
  import subprocess
5
+ import warnings
5
6
 
6
7
  import cv2
7
8
  import numpy as np
8
9
 
10
+ from .timing import NULL_TIMER, Timer
11
+
9
12
 
10
13
  class VideoProcessor:
11
14
  """
@@ -15,14 +18,16 @@ class VideoProcessor:
15
18
  No dimensions are hardcoded - all dimensions are extracted from actual frame data.
16
19
  """
17
20
 
18
- def __init__(self, video_path: str):
21
+ def __init__(self, video_path: str, timer: Timer | None = None) -> None:
19
22
  """
20
23
  Initialize video processor.
21
24
 
22
25
  Args:
23
26
  video_path: Path to input video file
27
+ timer: Optional Timer for measuring operations
24
28
  """
25
29
  self.video_path = video_path
30
+ self.timer = timer or NULL_TIMER
26
31
  self.cap = cv2.VideoCapture(video_path)
27
32
 
28
33
  if not self.cap.isOpened():
@@ -30,6 +35,8 @@ class VideoProcessor:
30
35
 
31
36
  self.fps = self.cap.get(cv2.CAP_PROP_FPS)
32
37
  self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
38
+ self._frame_index: int = 0
39
+ self._current_timestamp_ms: int = 0 # Timestamp for the current frame
33
40
 
34
41
  # Read first frame to get actual dimensions
35
42
  # This is critical for preserving aspect ratio, especially with mobile videos
@@ -45,10 +52,14 @@ class VideoProcessor:
45
52
  self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
46
53
  self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
47
54
 
48
- # Extract rotation metadata from video (iPhones store rotation in side_data_list)
49
- # OpenCV ignores rotation metadata, so we need to extract and apply it manually
55
+ # Extract rotation metadata from video (iPhones store rotation in
56
+ # side_data_list). OpenCV ignores rotation metadata, so we need to
57
+ # extract and apply it manually
50
58
  self.rotation = 0 # Will be set by _extract_video_metadata()
51
59
 
60
+ # Extract codec information from video metadata
61
+ self.codec: str | None = None # Will be set by _extract_video_metadata()
62
+
52
63
  # Calculate display dimensions considering SAR (Sample Aspect Ratio)
53
64
  # Mobile videos often have non-square pixels encoded in SAR metadata
54
65
  # OpenCV doesn't directly expose SAR, but we need to handle display correctly
@@ -65,6 +76,62 @@ class VideoProcessor:
65
76
  self.display_width,
66
77
  )
67
78
 
79
+ @property
80
+ def current_timestamp_ms(self) -> int:
81
+ """Get the current frame timestamp in milliseconds.
82
+
83
+ Returns:
84
+ Timestamp in milliseconds for the frame most recently read.
85
+ For the first frame, this returns 0 ms.
86
+ """
87
+ return self._current_timestamp_ms
88
+
89
+ @property
90
+ def frame_index(self) -> int:
91
+ """Get the current frame index.
92
+
93
+ Returns:
94
+ Current frame number (0-based) - the frame most recently read
95
+ """
96
+ return self._frame_index
97
+
98
+ def _parse_sample_aspect_ratio(self, sar_str: str) -> None:
99
+ """
100
+ Parse SAR string and update display dimensions.
101
+
102
+ Args:
103
+ sar_str: SAR string in format "width:height" (e.g., "270:473")
104
+ """
105
+ if not sar_str or ":" not in sar_str:
106
+ return
107
+
108
+ sar_parts = sar_str.split(":")
109
+ sar_width = int(sar_parts[0])
110
+ sar_height = int(sar_parts[1])
111
+
112
+ # Calculate display dimensions if pixels are non-square
113
+ # DAR = (width * SAR_width) / (height * SAR_height)
114
+ if sar_width != sar_height:
115
+ self.display_width = int(self.width * sar_width / sar_height)
116
+ self.display_height = self.height
117
+
118
+ def _extract_rotation_from_stream(self, stream: dict) -> int: # type: ignore[type-arg]
119
+ """
120
+ Extract rotation metadata from video stream.
121
+
122
+ Args:
123
+ stream: ffprobe stream dictionary
124
+
125
+ Returns:
126
+ Rotation angle in degrees (0, 90, -90, 180)
127
+ """
128
+ side_data_list = stream.get("side_data_list", [])
129
+ for side_data in side_data_list:
130
+ if side_data.get("side_data_type") == "Display Matrix":
131
+ rotation = side_data.get("rotation", 0)
132
+ return int(rotation)
133
+ return 0
134
+
68
135
  def _extract_video_metadata(self) -> None:
69
136
  """
70
137
  Extract video metadata including SAR and rotation using ffprobe.
@@ -94,37 +161,36 @@ class VideoProcessor:
94
161
  timeout=5,
95
162
  )
96
163
 
97
- if result.returncode == 0:
98
- data = json.loads(result.stdout)
99
- if "streams" in data and len(data["streams"]) > 0:
100
- stream = data["streams"][0]
101
-
102
- # Extract SAR (Sample Aspect Ratio)
103
- sar_str = stream.get("sample_aspect_ratio", "1:1")
104
-
105
- # Parse SAR (e.g., "270:473")
106
- if sar_str and ":" in sar_str:
107
- sar_parts = sar_str.split(":")
108
- sar_width = int(sar_parts[0])
109
- sar_height = int(sar_parts[1])
110
-
111
- # Calculate display dimensions
112
- # DAR = (width * SAR_width) / (height * SAR_height)
113
- if sar_width != sar_height:
114
- self.display_width = int(
115
- self.width * sar_width / sar_height
116
- )
117
- self.display_height = self.height
118
-
119
- # Extract rotation from side_data_list (common for iPhone videos)
120
- side_data_list = stream.get("side_data_list", [])
121
- for side_data in side_data_list:
122
- if side_data.get("side_data_type") == "Display Matrix":
123
- rotation = side_data.get("rotation", 0)
124
- # Convert to int and normalize to 0, 90, -90, 180
125
- self.rotation = int(rotation)
126
- except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
127
- # If ffprobe fails, keep original dimensions (square pixels)
164
+ if result.returncode != 0:
165
+ return
166
+
167
+ data = json.loads(result.stdout)
168
+ if "streams" not in data or len(data["streams"]) == 0:
169
+ return
170
+
171
+ stream = data["streams"][0]
172
+
173
+ # Extract codec name (e.g., "h264", "hevc", "vp9")
174
+ self.codec = stream.get("codec_name")
175
+
176
+ # Extract and parse SAR (Sample Aspect Ratio)
177
+ sar_str = stream.get("sample_aspect_ratio", "1:1")
178
+ self._parse_sample_aspect_ratio(sar_str)
179
+
180
+ # Extract rotation from side_data_list (common for iPhone videos)
181
+ self.rotation = self._extract_rotation_from_stream(stream)
182
+
183
+ except FileNotFoundError:
184
+ # ffprobe not found - warn user about reduced functionality
185
+ warnings.warn(
186
+ "ffprobe not found. Video rotation and aspect ratio metadata will be "
187
+ "ignored. This may cause issues with mobile/rotated videos. "
188
+ "Install FFmpeg for full video support: https://ffmpeg.org/download.html",
189
+ UserWarning,
190
+ stacklevel=2,
191
+ )
192
+ except (subprocess.TimeoutExpired, json.JSONDecodeError):
193
+ # If ffprobe fails for other reasons, silently continue with defaults
128
194
  pass
129
195
 
130
196
  def read_frame(self) -> np.ndarray | None:
@@ -133,34 +199,53 @@ class VideoProcessor:
133
199
 
134
200
  OpenCV ignores rotation metadata, so we manually apply rotation
135
201
  based on the display matrix metadata extracted from the video.
202
+
203
+ Returns:
204
+ Frame as numpy array or None if no more frames
136
205
  """
137
- ret, frame = self.cap.read()
206
+ with self.timer.measure("frame_read"):
207
+ ret, frame = self.cap.read()
208
+
138
209
  if not ret:
139
210
  return None
140
211
 
141
- # Apply rotation if video has rotation metadata
142
- if self.rotation == -90 or self.rotation == 270:
143
- # -90 degrees = rotate 90 degrees clockwise
144
- frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
145
- elif self.rotation == 90 or self.rotation == -270:
146
- # 90 degrees = rotate 90 degrees counter-clockwise
147
- frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
148
- elif self.rotation == 180 or self.rotation == -180:
149
- # 180 degrees rotation
150
- frame = cv2.rotate(frame, cv2.ROTATE_180)
212
+ # Calculate timestamp for this frame BEFORE incrementing index
213
+ # This ensures frame 0 has timestamp 0ms, frame 1 has timestamp 16ms, etc.
214
+ if self.fps > 0:
215
+ self._current_timestamp_ms = int(self._frame_index * 1000 / self.fps)
151
216
 
217
+ # Apply rotation if video has rotation metadata
218
+ with self.timer.measure("frame_rotation"):
219
+ if self.rotation == -90 or self.rotation == 270:
220
+ # -90 degrees = rotate 90 degrees clockwise
221
+ frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
222
+ elif self.rotation == 90 or self.rotation == -270:
223
+ # 90 degrees = rotate 90 degrees counter-clockwise
224
+ frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
225
+ elif self.rotation == 180 or self.rotation == -180:
226
+ # 180 degrees rotation
227
+ frame = cv2.rotate(frame, cv2.ROTATE_180)
228
+
229
+ self._frame_index += 1
152
230
  return frame
153
231
 
154
- def reset(self) -> None:
155
- """Reset video to beginning."""
156
- self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
157
-
158
232
  def close(self) -> None:
159
233
  """Release video capture."""
160
234
  self.cap.release()
161
235
 
236
+ def __iter__(self) -> "VideoProcessor":
237
+ """Make the processor iterable."""
238
+ return self
239
+
240
+ def __next__(self) -> np.ndarray:
241
+ """Get the next frame during iteration."""
242
+ frame = self.read_frame()
243
+ if frame is None:
244
+ raise StopIteration
245
+ return frame
246
+
162
247
  def __enter__(self) -> "VideoProcessor":
163
248
  return self
164
249
 
165
- def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore[no-untyped-def]
250
+ def __exit__(self, _exc_type, _exc_val, _exc_tb) -> None: # type: ignore[no-untyped-def]
166
251
  self.close()
@@ -1,12 +1,12 @@
1
1
  """Drop jump analysis module."""
2
2
 
3
+ from ..core.smoothing import interpolate_threshold_crossing
3
4
  from .analysis import (
4
5
  ContactState,
5
6
  calculate_adaptive_threshold,
6
7
  compute_average_foot_position,
7
8
  detect_ground_contact,
8
9
  find_interpolated_phase_transitions_with_curvature,
9
- interpolate_threshold_crossing,
10
10
  refine_transition_with_curvature,
11
11
  )
12
12
  from .debug_overlay import DebugOverlayRenderer