kinemotion 0.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/__init__.py +14 -0
- kinemotion/api.py +428 -0
- kinemotion/cli.py +20 -0
- kinemotion/core/__init__.py +40 -0
- kinemotion/core/auto_tuning.py +289 -0
- kinemotion/core/filtering.py +345 -0
- kinemotion/core/pose.py +220 -0
- kinemotion/core/smoothing.py +366 -0
- kinemotion/core/video_io.py +166 -0
- kinemotion/dropjump/__init__.py +29 -0
- kinemotion/dropjump/analysis.py +639 -0
- kinemotion/dropjump/cli.py +738 -0
- kinemotion/dropjump/debug_overlay.py +252 -0
- kinemotion/dropjump/kinematics.py +439 -0
- kinemotion/py.typed +0 -0
- kinemotion-0.10.2.dist-info/METADATA +561 -0
- kinemotion-0.10.2.dist-info/RECORD +20 -0
- kinemotion-0.10.2.dist-info/WHEEL +4 -0
- kinemotion-0.10.2.dist-info/entry_points.txt +2 -0
- kinemotion-0.10.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""Generic video I/O functionality for all jump analysis types."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess
|
|
5
|
+
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class VideoProcessor:
|
|
11
|
+
"""
|
|
12
|
+
Handles video reading and processing.
|
|
13
|
+
|
|
14
|
+
IMPORTANT: This class preserves the exact aspect ratio of the source video.
|
|
15
|
+
No dimensions are hardcoded - all dimensions are extracted from actual frame data.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, video_path: str):
|
|
19
|
+
"""
|
|
20
|
+
Initialize video processor.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
video_path: Path to input video file
|
|
24
|
+
"""
|
|
25
|
+
self.video_path = video_path
|
|
26
|
+
self.cap = cv2.VideoCapture(video_path)
|
|
27
|
+
|
|
28
|
+
if not self.cap.isOpened():
|
|
29
|
+
raise ValueError(f"Could not open video: {video_path}")
|
|
30
|
+
|
|
31
|
+
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
|
|
32
|
+
self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
33
|
+
|
|
34
|
+
# Read first frame to get actual dimensions
|
|
35
|
+
# This is critical for preserving aspect ratio, especially with mobile videos
|
|
36
|
+
# that have rotation metadata. OpenCV properties (CAP_PROP_FRAME_WIDTH/HEIGHT)
|
|
37
|
+
# may return incorrect dimensions, so we read the actual frame data.
|
|
38
|
+
ret, first_frame = self.cap.read()
|
|
39
|
+
if ret:
|
|
40
|
+
# frame.shape is (height, width, channels) - extract actual dimensions
|
|
41
|
+
self.height, self.width = first_frame.shape[:2]
|
|
42
|
+
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0) # Reset to beginning
|
|
43
|
+
else:
|
|
44
|
+
# Fallback to video properties if can't read frame
|
|
45
|
+
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
46
|
+
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
47
|
+
|
|
48
|
+
# Extract rotation metadata from video (iPhones store rotation in side_data_list)
|
|
49
|
+
# OpenCV ignores rotation metadata, so we need to extract and apply it manually
|
|
50
|
+
self.rotation = 0 # Will be set by _extract_video_metadata()
|
|
51
|
+
|
|
52
|
+
# Calculate display dimensions considering SAR (Sample Aspect Ratio)
|
|
53
|
+
# Mobile videos often have non-square pixels encoded in SAR metadata
|
|
54
|
+
# OpenCV doesn't directly expose SAR, but we need to handle display correctly
|
|
55
|
+
self.display_width = self.width
|
|
56
|
+
self.display_height = self.height
|
|
57
|
+
self._extract_video_metadata()
|
|
58
|
+
|
|
59
|
+
# Apply rotation to dimensions if needed
|
|
60
|
+
if self.rotation in [90, -90, 270]:
|
|
61
|
+
# Swap dimensions for 90/-90 degree rotations
|
|
62
|
+
self.width, self.height = self.height, self.width
|
|
63
|
+
self.display_width, self.display_height = (
|
|
64
|
+
self.display_height,
|
|
65
|
+
self.display_width,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def _extract_video_metadata(self) -> None:
|
|
69
|
+
"""
|
|
70
|
+
Extract video metadata including SAR and rotation using ffprobe.
|
|
71
|
+
|
|
72
|
+
Many mobile videos (especially from iPhones) have:
|
|
73
|
+
- Non-square pixels (SAR != 1:1) affecting display dimensions
|
|
74
|
+
- Rotation metadata in side_data_list that OpenCV ignores
|
|
75
|
+
|
|
76
|
+
We extract both to ensure proper display and pose detection.
|
|
77
|
+
"""
|
|
78
|
+
try:
|
|
79
|
+
# Use ffprobe to get SAR metadata
|
|
80
|
+
result = subprocess.run(
|
|
81
|
+
[
|
|
82
|
+
"ffprobe",
|
|
83
|
+
"-v",
|
|
84
|
+
"quiet",
|
|
85
|
+
"-print_format",
|
|
86
|
+
"json",
|
|
87
|
+
"-show_streams",
|
|
88
|
+
"-select_streams",
|
|
89
|
+
"v:0",
|
|
90
|
+
self.video_path,
|
|
91
|
+
],
|
|
92
|
+
capture_output=True,
|
|
93
|
+
text=True,
|
|
94
|
+
timeout=5,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
if result.returncode == 0:
|
|
98
|
+
data = json.loads(result.stdout)
|
|
99
|
+
if "streams" in data and len(data["streams"]) > 0:
|
|
100
|
+
stream = data["streams"][0]
|
|
101
|
+
|
|
102
|
+
# Extract SAR (Sample Aspect Ratio)
|
|
103
|
+
sar_str = stream.get("sample_aspect_ratio", "1:1")
|
|
104
|
+
|
|
105
|
+
# Parse SAR (e.g., "270:473")
|
|
106
|
+
if sar_str and ":" in sar_str:
|
|
107
|
+
sar_parts = sar_str.split(":")
|
|
108
|
+
sar_width = int(sar_parts[0])
|
|
109
|
+
sar_height = int(sar_parts[1])
|
|
110
|
+
|
|
111
|
+
# Calculate display dimensions
|
|
112
|
+
# DAR = (width * SAR_width) / (height * SAR_height)
|
|
113
|
+
if sar_width != sar_height:
|
|
114
|
+
self.display_width = int(
|
|
115
|
+
self.width * sar_width / sar_height
|
|
116
|
+
)
|
|
117
|
+
self.display_height = self.height
|
|
118
|
+
|
|
119
|
+
# Extract rotation from side_data_list (common for iPhone videos)
|
|
120
|
+
side_data_list = stream.get("side_data_list", [])
|
|
121
|
+
for side_data in side_data_list:
|
|
122
|
+
if side_data.get("side_data_type") == "Display Matrix":
|
|
123
|
+
rotation = side_data.get("rotation", 0)
|
|
124
|
+
# Convert to int and normalize to 0, 90, -90, 180
|
|
125
|
+
self.rotation = int(rotation)
|
|
126
|
+
except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
|
|
127
|
+
# If ffprobe fails, keep original dimensions (square pixels)
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
def read_frame(self) -> np.ndarray | None:
|
|
131
|
+
"""
|
|
132
|
+
Read next frame from video and apply rotation if needed.
|
|
133
|
+
|
|
134
|
+
OpenCV ignores rotation metadata, so we manually apply rotation
|
|
135
|
+
based on the display matrix metadata extracted from the video.
|
|
136
|
+
"""
|
|
137
|
+
ret, frame = self.cap.read()
|
|
138
|
+
if not ret:
|
|
139
|
+
return None
|
|
140
|
+
|
|
141
|
+
# Apply rotation if video has rotation metadata
|
|
142
|
+
if self.rotation == -90 or self.rotation == 270:
|
|
143
|
+
# -90 degrees = rotate 90 degrees clockwise
|
|
144
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
145
|
+
elif self.rotation == 90 or self.rotation == -270:
|
|
146
|
+
# 90 degrees = rotate 90 degrees counter-clockwise
|
|
147
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
148
|
+
elif self.rotation == 180 or self.rotation == -180:
|
|
149
|
+
# 180 degrees rotation
|
|
150
|
+
frame = cv2.rotate(frame, cv2.ROTATE_180)
|
|
151
|
+
|
|
152
|
+
return frame
|
|
153
|
+
|
|
154
|
+
def reset(self) -> None:
|
|
155
|
+
"""Reset video to beginning."""
|
|
156
|
+
self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
|
|
157
|
+
|
|
158
|
+
def close(self) -> None:
|
|
159
|
+
"""Release video capture."""
|
|
160
|
+
self.cap.release()
|
|
161
|
+
|
|
162
|
+
def __enter__(self) -> "VideoProcessor":
|
|
163
|
+
return self
|
|
164
|
+
|
|
165
|
+
def __exit__(self, exc_type, exc_val, exc_tb) -> None: # type: ignore[no-untyped-def]
|
|
166
|
+
self.close()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Drop jump analysis module."""
|
|
2
|
+
|
|
3
|
+
from .analysis import (
|
|
4
|
+
ContactState,
|
|
5
|
+
calculate_adaptive_threshold,
|
|
6
|
+
compute_average_foot_position,
|
|
7
|
+
detect_ground_contact,
|
|
8
|
+
find_interpolated_phase_transitions_with_curvature,
|
|
9
|
+
interpolate_threshold_crossing,
|
|
10
|
+
refine_transition_with_curvature,
|
|
11
|
+
)
|
|
12
|
+
from .debug_overlay import DebugOverlayRenderer
|
|
13
|
+
from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
# Contact detection
|
|
17
|
+
"ContactState",
|
|
18
|
+
"detect_ground_contact",
|
|
19
|
+
"compute_average_foot_position",
|
|
20
|
+
"calculate_adaptive_threshold",
|
|
21
|
+
"interpolate_threshold_crossing",
|
|
22
|
+
"refine_transition_with_curvature",
|
|
23
|
+
"find_interpolated_phase_transitions_with_curvature",
|
|
24
|
+
# Metrics
|
|
25
|
+
"DropJumpMetrics",
|
|
26
|
+
"calculate_drop_jump_metrics",
|
|
27
|
+
# Debug overlay
|
|
28
|
+
"DebugOverlayRenderer",
|
|
29
|
+
]
|