kinemotion 0.12.1__tar.gz → 0.12.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (72) hide show
  1. {kinemotion-0.12.1 → kinemotion-0.12.3}/CHANGELOG.md +44 -0
  2. {kinemotion-0.12.1 → kinemotion-0.12.3}/PKG-INFO +1 -1
  3. {kinemotion-0.12.1 → kinemotion-0.12.3}/examples/programmatic_usage.py +3 -26
  4. {kinemotion-0.12.1 → kinemotion-0.12.3}/pyproject.toml +1 -1
  5. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/api.py +0 -2
  6. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/auto_tuning.py +66 -30
  7. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/pose.py +134 -95
  8. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/smoothing.py +2 -2
  9. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/video_io.py +53 -29
  10. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/dropjump/analysis.py +205 -123
  11. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/dropjump/cli.py +60 -57
  12. kinemotion-0.12.3/src/kinemotion/dropjump/debug_overlay.py +179 -0
  13. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/dropjump/kinematics.py +3 -6
  14. {kinemotion-0.12.1 → kinemotion-0.12.3}/uv.lock +1 -1
  15. kinemotion-0.12.1/src/kinemotion/dropjump/debug_overlay.py +0 -167
  16. {kinemotion-0.12.1 → kinemotion-0.12.3}/.dockerignore +0 -0
  17. {kinemotion-0.12.1 → kinemotion-0.12.3}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  18. {kinemotion-0.12.1 → kinemotion-0.12.3}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  19. {kinemotion-0.12.1 → kinemotion-0.12.3}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  20. {kinemotion-0.12.1 → kinemotion-0.12.3}/.github/pull_request_template.md +0 -0
  21. {kinemotion-0.12.1 → kinemotion-0.12.3}/.github/workflows/release.yml +0 -0
  22. {kinemotion-0.12.1 → kinemotion-0.12.3}/.gitignore +0 -0
  23. {kinemotion-0.12.1 → kinemotion-0.12.3}/.pre-commit-config.yaml +0 -0
  24. {kinemotion-0.12.1 → kinemotion-0.12.3}/.tool-versions +0 -0
  25. {kinemotion-0.12.1 → kinemotion-0.12.3}/CLAUDE.md +0 -0
  26. {kinemotion-0.12.1 → kinemotion-0.12.3}/CODE_OF_CONDUCT.md +0 -0
  27. {kinemotion-0.12.1 → kinemotion-0.12.3}/CONTRIBUTING.md +0 -0
  28. {kinemotion-0.12.1 → kinemotion-0.12.3}/Dockerfile +0 -0
  29. {kinemotion-0.12.1 → kinemotion-0.12.3}/GEMINI.md +0 -0
  30. {kinemotion-0.12.1 → kinemotion-0.12.3}/LICENSE +0 -0
  31. {kinemotion-0.12.1 → kinemotion-0.12.3}/README.md +0 -0
  32. {kinemotion-0.12.1 → kinemotion-0.12.3}/SECURITY.md +0 -0
  33. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/BULK_PROCESSING.md +0 -0
  34. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/CAMERA_SETUP.md +0 -0
  35. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/CAMERA_SETUP_ES.md +0 -0
  36. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/CMJ_GUIDE.md +0 -0
  37. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/ERRORS_FINDINGS.md +0 -0
  38. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/FRAMERATE.md +0 -0
  39. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/IMU_METADATA_PRESERVATION.md +0 -0
  40. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/PARAMETERS.md +0 -0
  41. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/REAL_TIME_ANALYSIS.md +0 -0
  42. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/TRIPLE_EXTENSION.md +0 -0
  43. {kinemotion-0.12.1 → kinemotion-0.12.3}/docs/VALIDATION_PLAN.md +0 -0
  44. {kinemotion-0.12.1 → kinemotion-0.12.3}/examples/bulk/README.md +0 -0
  45. {kinemotion-0.12.1 → kinemotion-0.12.3}/examples/bulk/bulk_processing.py +0 -0
  46. {kinemotion-0.12.1 → kinemotion-0.12.3}/examples/bulk/simple_example.py +0 -0
  47. {kinemotion-0.12.1 → kinemotion-0.12.3}/samples/cmjs/README.md +0 -0
  48. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/__init__.py +0 -0
  49. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cli.py +0 -0
  50. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cmj/__init__.py +0 -0
  51. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cmj/analysis.py +0 -0
  52. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cmj/cli.py +0 -0
  53. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cmj/debug_overlay.py +0 -0
  54. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cmj/joint_angles.py +0 -0
  55. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/cmj/kinematics.py +0 -0
  56. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/__init__.py +0 -0
  57. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/cli_utils.py +0 -0
  58. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/debug_overlay_utils.py +0 -0
  59. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/core/filtering.py +0 -0
  60. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/dropjump/__init__.py +0 -0
  61. {kinemotion-0.12.1 → kinemotion-0.12.3}/src/kinemotion/py.typed +0 -0
  62. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/__init__.py +0 -0
  63. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_adaptive_threshold.py +0 -0
  64. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_api.py +0 -0
  65. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_aspect_ratio.py +0 -0
  66. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_cmj_analysis.py +0 -0
  67. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_cmj_kinematics.py +0 -0
  68. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_com_estimation.py +0 -0
  69. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_contact_detection.py +0 -0
  70. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_filtering.py +0 -0
  71. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_kinematics.py +0 -0
  72. {kinemotion-0.12.1 → kinemotion-0.12.3}/tests/test_polyorder.py +0 -0
@@ -7,6 +7,50 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  <!-- version list -->
9
9
 
10
+ ## v0.12.3 (2025-11-06)
11
+
12
+ ### Bug Fixes
13
+
14
+ - Resolve SonarCloud cognitive complexity violations
15
+ ([`5b20c48`](https://github.com/feniix/kinemotion/commit/5b20c488e058ac3628b0e20847d3fe2539a687c4))
16
+
17
+ ### Refactoring
18
+
19
+ - **core**: Reduce cognitive complexity in video_io and auto_tuning
20
+ ([`14076fe`](https://github.com/feniix/kinemotion/commit/14076fe9d1f9b41ef2ff9bd643b17cf566e18654))
21
+
22
+ - **dropjump**: Add shared utility for foot position extraction
23
+ ([`5222cc4`](https://github.com/feniix/kinemotion/commit/5222cc471b9f4406116de0b7fc193f07d21cd88a))
24
+
25
+ - **dropjump**: Reduce cognitive complexity in CLI functions
26
+ ([`6fc887f`](https://github.com/feniix/kinemotion/commit/6fc887f6288e870a306aa1e3ffc7b8a46c21c3fc))
27
+
28
+ - **examples**: Simplify programmatic usage with shared utility
29
+ ([`5e1bc19`](https://github.com/feniix/kinemotion/commit/5e1bc194f5784a24cfcbc7e6372ebd26a95225aa))
30
+
31
+
32
+ ## v0.12.2 (2025-11-06)
33
+
34
+ ### Bug Fixes
35
+
36
+ - **core**: Suppress false positive for polyorder parameter
37
+ ([`ae5ffea`](https://github.com/feniix/kinemotion/commit/ae5ffea708741592e1cd356cdf35dcc388cbe97f))
38
+
39
+ - **dropjump**: Remove unused parameters from calculate_drop_jump_metrics
40
+ ([`6130c11`](https://github.com/feniix/kinemotion/commit/6130c113be71dcd8c278b1f31a3b5e300a6b4532))
41
+
42
+ ### Refactoring
43
+
44
+ - **core**: Reduce cognitive complexity in pose.py
45
+ ([`f0a3805`](https://github.com/feniix/kinemotion/commit/f0a380561844e54b4372f57c93b82f8c8a1440ee))
46
+
47
+ - **dropjump**: Reduce cognitive complexity in analysis.py
48
+ ([`180bb37`](https://github.com/feniix/kinemotion/commit/180bb373f63675ef6ecacaea8e9ee9f63c3d3746))
49
+
50
+ - **dropjump**: Reduce cognitive complexity in debug_overlay.py
51
+ ([`076cb56`](https://github.com/feniix/kinemotion/commit/076cb560c55baaff0ba93d0631eb38d69f8a7d7b))
52
+
53
+
10
54
  ## v0.12.1 (2025-11-06)
11
55
 
12
56
  ### Bug Fixes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.12.1
3
+ Version: 0.12.3
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -2,14 +2,12 @@
2
2
 
3
3
  from typing import Any
4
4
 
5
- import numpy as np
6
-
7
5
  from kinemotion.core.pose import PoseTracker
8
6
  from kinemotion.core.smoothing import smooth_landmarks
9
7
  from kinemotion.core.video_io import VideoProcessor
10
8
  from kinemotion.dropjump.analysis import (
11
- compute_average_foot_position,
12
9
  detect_ground_contact,
10
+ extract_foot_positions_and_visibilities,
13
11
  )
14
12
  from kinemotion.dropjump.kinematics import calculate_drop_jump_metrics
15
13
 
@@ -48,29 +46,8 @@ def analyze_video(video_path: str) -> dict[str, Any]:
48
46
  # Smooth landmarks
49
47
  smoothed = smooth_landmarks(landmarks_sequence, window_length=5)
50
48
 
51
- # Extract foot positions
52
- foot_positions_list: list[float] = []
53
- visibilities_list: list[float] = []
54
-
55
- for frame_landmarks in smoothed:
56
- if frame_landmarks:
57
- _, foot_y = compute_average_foot_position(frame_landmarks)
58
- foot_positions_list.append(foot_y)
59
-
60
- # Average foot visibility
61
- foot_vis = []
62
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
63
- if key in frame_landmarks:
64
- foot_vis.append(frame_landmarks[key][2])
65
- visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
66
- else:
67
- foot_positions_list.append(
68
- foot_positions_list[-1] if foot_positions_list else 0.5
69
- )
70
- visibilities_list.append(0.0)
71
-
72
- foot_positions: np.ndarray[Any, Any] = np.array(foot_positions_list)
73
- visibilities: np.ndarray[Any, Any] = np.array(visibilities_list)
49
+ # Extract foot positions and visibilities using shared utility
50
+ foot_positions, visibilities = extract_foot_positions_and_visibilities(smoothed)
74
51
 
75
52
  # Detect contact
76
53
  contact_states = detect_ground_contact(
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "kinemotion"
3
- version = "0.12.1"
3
+ version = "0.12.3"
4
4
  description = "Video-based kinematic analysis for athletic performance"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10,<3.13"
@@ -463,13 +463,11 @@ def process_video(
463
463
  contact_states,
464
464
  vertical_positions,
465
465
  video.fps,
466
- drop_height_m=None,
467
466
  drop_start_frame=drop_start_frame,
468
467
  velocity_threshold=params.velocity_threshold,
469
468
  smoothing_window=params.smoothing_window,
470
469
  polyorder=params.polyorder,
471
470
  use_curvature=params.use_curvature,
472
- kinematic_correction_factor=1.0,
473
471
  )
474
472
 
475
473
  # Generate outputs (JSON and debug video)
@@ -216,6 +216,59 @@ def auto_tune_parameters(
216
216
  )
217
217
 
218
218
 
219
+ def _collect_foot_visibility_and_positions(
220
+ frame_landmarks: dict[str, tuple[float, float, float]],
221
+ ) -> tuple[list[float], list[float]]:
222
+ """
223
+ Collect visibility scores and Y positions from foot landmarks.
224
+
225
+ Args:
226
+ frame_landmarks: Landmarks for a single frame
227
+
228
+ Returns:
229
+ Tuple of (visibility_scores, y_positions)
230
+ """
231
+ foot_keys = [
232
+ "left_ankle",
233
+ "right_ankle",
234
+ "left_heel",
235
+ "right_heel",
236
+ "left_foot_index",
237
+ "right_foot_index",
238
+ ]
239
+
240
+ frame_vis = []
241
+ frame_y_positions = []
242
+
243
+ for key in foot_keys:
244
+ if key in frame_landmarks:
245
+ _, y, vis = frame_landmarks[key] # x not needed for analysis
246
+ frame_vis.append(vis)
247
+ frame_y_positions.append(y)
248
+
249
+ return frame_vis, frame_y_positions
250
+
251
+
252
+ def _check_stable_period(positions: list[float]) -> bool:
253
+ """
254
+ Check if video has a stable period at the start.
255
+
256
+ A stable period (low variance in first 30 frames) indicates
257
+ the subject is standing on an elevated platform before jumping.
258
+
259
+ Args:
260
+ positions: List of average Y positions per frame
261
+
262
+ Returns:
263
+ True if stable period detected, False otherwise
264
+ """
265
+ if len(positions) < 30:
266
+ return False
267
+
268
+ first_30_std = float(np.std(positions[:30]))
269
+ return first_30_std < 0.01 # Very stable = on platform
270
+
271
+
219
272
  def analyze_video_sample(
220
273
  landmarks_sequence: list[dict[str, tuple[float, float, float]] | None],
221
274
  fps: float,
@@ -235,35 +288,22 @@ def analyze_video_sample(
235
288
  Returns:
236
289
  VideoCharacteristics with analyzed properties
237
290
  """
238
- # Calculate average landmark visibility
239
291
  visibilities = []
240
292
  positions = []
241
293
 
294
+ # Collect visibility and position data from all frames
242
295
  for frame_landmarks in landmarks_sequence:
243
- if frame_landmarks:
244
- # Collect visibility scores from foot landmarks
245
- foot_keys = [
246
- "left_ankle",
247
- "right_ankle",
248
- "left_heel",
249
- "right_heel",
250
- "left_foot_index",
251
- "right_foot_index",
252
- ]
253
-
254
- frame_vis = []
255
- frame_y_positions = []
256
-
257
- for key in foot_keys:
258
- if key in frame_landmarks:
259
- _, y, vis = frame_landmarks[key] # x not needed for analysis
260
- frame_vis.append(vis)
261
- frame_y_positions.append(y)
262
-
263
- if frame_vis:
264
- visibilities.append(float(np.mean(frame_vis)))
265
- if frame_y_positions:
266
- positions.append(float(np.mean(frame_y_positions)))
296
+ if not frame_landmarks:
297
+ continue
298
+
299
+ frame_vis, frame_y_positions = _collect_foot_visibility_and_positions(
300
+ frame_landmarks
301
+ )
302
+
303
+ if frame_vis:
304
+ visibilities.append(float(np.mean(frame_vis)))
305
+ if frame_y_positions:
306
+ positions.append(float(np.mean(frame_y_positions)))
267
307
 
268
308
  # Compute metrics
269
309
  avg_visibility = float(np.mean(visibilities)) if visibilities else 0.5
@@ -273,11 +313,7 @@ def analyze_video_sample(
273
313
  tracking_quality = analyze_tracking_quality(avg_visibility)
274
314
 
275
315
  # Check for stable period (indicates drop jump from elevated platform)
276
- # Simple check: do first 30 frames have low variance?
277
- has_stable_period = False
278
- if len(positions) >= 30:
279
- first_30_std = float(np.std(positions[:30]))
280
- has_stable_period = first_30_std < 0.01 # Very stable = on platform
316
+ has_stable_period = _check_stable_period(positions)
281
317
 
282
318
  return VideoCharacteristics(
283
319
  fps=fps,
@@ -81,6 +81,100 @@ class PoseTracker:
81
81
  self.pose.close()
82
82
 
83
83
 
84
+ def _add_head_segment(
85
+ segments: list,
86
+ weights: list,
87
+ visibilities: list,
88
+ landmarks: dict[str, tuple[float, float, float]],
89
+ vis_threshold: float,
90
+ ) -> None:
91
+ """Add head segment (8% body mass) if visible."""
92
+ if "nose" in landmarks:
93
+ x, y, vis = landmarks["nose"]
94
+ if vis > vis_threshold:
95
+ segments.append((x, y))
96
+ weights.append(0.08)
97
+ visibilities.append(vis)
98
+
99
+
100
+ def _add_trunk_segment(
101
+ segments: list,
102
+ weights: list,
103
+ visibilities: list,
104
+ landmarks: dict[str, tuple[float, float, float]],
105
+ vis_threshold: float,
106
+ ) -> None:
107
+ """Add trunk segment (50% body mass) if visible."""
108
+ trunk_keys = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
109
+ trunk_pos = [
110
+ (x, y, vis)
111
+ for key in trunk_keys
112
+ if key in landmarks
113
+ for x, y, vis in [landmarks[key]]
114
+ if vis > vis_threshold
115
+ ]
116
+ if len(trunk_pos) >= 2:
117
+ trunk_x = float(np.mean([p[0] for p in trunk_pos]))
118
+ trunk_y = float(np.mean([p[1] for p in trunk_pos]))
119
+ trunk_vis = float(np.mean([p[2] for p in trunk_pos]))
120
+ segments.append((trunk_x, trunk_y))
121
+ weights.append(0.50)
122
+ visibilities.append(trunk_vis)
123
+
124
+
125
+ def _add_limb_segment(
126
+ segments: list,
127
+ weights: list,
128
+ visibilities: list,
129
+ landmarks: dict[str, tuple[float, float, float]],
130
+ side: str,
131
+ proximal_key: str,
132
+ distal_key: str,
133
+ segment_weight: float,
134
+ vis_threshold: float,
135
+ ) -> None:
136
+ """Add a limb segment (thigh or lower leg) if both endpoints visible."""
137
+ prox_full = f"{side}_{proximal_key}"
138
+ dist_full = f"{side}_{distal_key}"
139
+
140
+ if prox_full in landmarks and dist_full in landmarks:
141
+ px, py, pvis = landmarks[prox_full]
142
+ dx, dy, dvis = landmarks[dist_full]
143
+ if pvis > vis_threshold and dvis > vis_threshold:
144
+ seg_x = (px + dx) / 2
145
+ seg_y = (py + dy) / 2
146
+ seg_vis = (pvis + dvis) / 2
147
+ segments.append((seg_x, seg_y))
148
+ weights.append(segment_weight)
149
+ visibilities.append(seg_vis)
150
+
151
+
152
+ def _add_foot_segment(
153
+ segments: list,
154
+ weights: list,
155
+ visibilities: list,
156
+ landmarks: dict[str, tuple[float, float, float]],
157
+ side: str,
158
+ vis_threshold: float,
159
+ ) -> None:
160
+ """Add foot segment (1.5% body mass per foot) if visible."""
161
+ foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
162
+ foot_pos = [
163
+ (x, y, vis)
164
+ for key in foot_keys
165
+ if key in landmarks
166
+ for x, y, vis in [landmarks[key]]
167
+ if vis > vis_threshold
168
+ ]
169
+ if foot_pos:
170
+ foot_x = float(np.mean([p[0] for p in foot_pos]))
171
+ foot_y = float(np.mean([p[1] for p in foot_pos]))
172
+ foot_vis = float(np.mean([p[2] for p in foot_pos]))
173
+ segments.append((foot_x, foot_y))
174
+ weights.append(0.015)
175
+ visibilities.append(foot_vis)
176
+
177
+
84
178
  def compute_center_of_mass(
85
179
  landmarks: dict[str, tuple[float, float, float]],
86
180
  visibility_threshold: float = 0.5,
@@ -106,114 +200,59 @@ def compute_center_of_mass(
106
200
  (x, y, visibility) tuple for estimated CoM position
107
201
  visibility = average visibility of all segments used
108
202
  """
109
- # Define segment representatives and their weights (as fraction of body mass)
110
- # Each segment uses midpoint or average of its bounding landmarks
111
- segments = []
112
- segment_weights = []
113
- visibilities = []
203
+ segments: list = []
204
+ weights: list = []
205
+ visibilities: list = []
114
206
 
115
- # Head segment: 8% (use nose as proxy)
116
- if "nose" in landmarks:
117
- x, y, vis = landmarks["nose"]
118
- if vis > visibility_threshold:
119
- segments.append((x, y))
120
- segment_weights.append(0.08)
121
- visibilities.append(vis)
207
+ # Add body segments
208
+ _add_head_segment(segments, weights, visibilities, landmarks, visibility_threshold)
209
+ _add_trunk_segment(segments, weights, visibilities, landmarks, visibility_threshold)
122
210
 
123
- # Trunk segment: 50% (midpoint between shoulders and hips)
124
- trunk_landmarks = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
125
- trunk_positions = [
126
- (x, y, vis)
127
- for key in trunk_landmarks
128
- if key in landmarks
129
- for x, y, vis in [landmarks[key]]
130
- if vis > visibility_threshold
131
- ]
132
- if len(trunk_positions) >= 2:
133
- trunk_x = float(np.mean([pos[0] for pos in trunk_positions]))
134
- trunk_y = float(np.mean([pos[1] for pos in trunk_positions]))
135
- trunk_vis = float(np.mean([pos[2] for pos in trunk_positions]))
136
- segments.append((trunk_x, trunk_y))
137
- segment_weights.append(0.50)
138
- visibilities.append(trunk_vis)
139
-
140
- # Thigh segment: 20% total (midpoint hip to knee for each leg)
211
+ # Add bilateral limb segments
141
212
  for side in ["left", "right"]:
142
- hip_key = f"{side}_hip"
143
- knee_key = f"{side}_knee"
144
- if hip_key in landmarks and knee_key in landmarks:
145
- hip_x, hip_y, hip_vis = landmarks[hip_key]
146
- knee_x, knee_y, knee_vis = landmarks[knee_key]
147
- if hip_vis > visibility_threshold and knee_vis > visibility_threshold:
148
- thigh_x = (hip_x + knee_x) / 2
149
- thigh_y = (hip_y + knee_y) / 2
150
- thigh_vis = (hip_vis + knee_vis) / 2
151
- segments.append((thigh_x, thigh_y))
152
- segment_weights.append(0.10) # 10% per leg
153
- visibilities.append(thigh_vis)
154
-
155
- # Lower leg segment: 10% total (midpoint knee to ankle for each leg)
156
- for side in ["left", "right"]:
157
- knee_key = f"{side}_knee"
158
- ankle_key = f"{side}_ankle"
159
- if knee_key in landmarks and ankle_key in landmarks:
160
- knee_x, knee_y, knee_vis = landmarks[knee_key]
161
- ankle_x, ankle_y, ankle_vis = landmarks[ankle_key]
162
- if knee_vis > visibility_threshold and ankle_vis > visibility_threshold:
163
- leg_x = (knee_x + ankle_x) / 2
164
- leg_y = (knee_y + ankle_y) / 2
165
- leg_vis = (knee_vis + ankle_vis) / 2
166
- segments.append((leg_x, leg_y))
167
- segment_weights.append(0.05) # 5% per leg
168
- visibilities.append(leg_vis)
169
-
170
- # Foot segment: 3% total (average of ankle, heel, foot_index)
171
- for side in ["left", "right"]:
172
- foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
173
- foot_positions = [
174
- (x, y, vis)
175
- for key in foot_keys
176
- if key in landmarks
177
- for x, y, vis in [landmarks[key]]
178
- if vis > visibility_threshold
179
- ]
180
- if foot_positions:
181
- foot_x = float(np.mean([pos[0] for pos in foot_positions]))
182
- foot_y = float(np.mean([pos[1] for pos in foot_positions]))
183
- foot_vis = float(np.mean([pos[2] for pos in foot_positions]))
184
- segments.append((foot_x, foot_y))
185
- segment_weights.append(0.015) # 1.5% per foot
186
- visibilities.append(foot_vis)
187
-
188
- # If no segments found, fall back to hip average
213
+ _add_limb_segment(
214
+ segments,
215
+ weights,
216
+ visibilities,
217
+ landmarks,
218
+ side,
219
+ "hip",
220
+ "knee",
221
+ 0.10,
222
+ visibility_threshold,
223
+ )
224
+ _add_limb_segment(
225
+ segments,
226
+ weights,
227
+ visibilities,
228
+ landmarks,
229
+ side,
230
+ "knee",
231
+ "ankle",
232
+ 0.05,
233
+ visibility_threshold,
234
+ )
235
+ _add_foot_segment(
236
+ segments, weights, visibilities, landmarks, side, visibility_threshold
237
+ )
238
+
239
+ # Fallback if no segments found
189
240
  if not segments:
190
241
  if "left_hip" in landmarks and "right_hip" in landmarks:
191
242
  lh_x, lh_y, lh_vis = landmarks["left_hip"]
192
243
  rh_x, rh_y, rh_vis = landmarks["right_hip"]
193
- return (
194
- (lh_x + rh_x) / 2,
195
- (lh_y + rh_y) / 2,
196
- (lh_vis + rh_vis) / 2,
197
- )
198
- # Ultimate fallback: center of frame
244
+ return ((lh_x + rh_x) / 2, (lh_y + rh_y) / 2, (lh_vis + rh_vis) / 2)
199
245
  return (0.5, 0.5, 0.0)
200
246
 
201
- # Normalize weights to sum to 1.0
202
- total_weight = sum(segment_weights)
203
- normalized_weights = [w / total_weight for w in segment_weights]
247
+ # Normalize weights and compute weighted average
248
+ total_weight = sum(weights)
249
+ normalized_weights = [w / total_weight for w in weights]
204
250
 
205
- # Compute weighted average of segment positions
206
251
  com_x = float(
207
- sum(
208
- pos[0] * weight
209
- for pos, weight in zip(segments, normalized_weights, strict=True)
210
- )
252
+ sum(p[0] * w for p, w in zip(segments, normalized_weights, strict=True))
211
253
  )
212
254
  com_y = float(
213
- sum(
214
- pos[1] * weight
215
- for pos, weight in zip(segments, normalized_weights, strict=True)
216
- )
255
+ sum(p[1] * w for p, w in zip(segments, normalized_weights, strict=True))
217
256
  )
218
257
  com_visibility = float(np.mean(visibilities)) if visibilities else 0.0
219
258
 
@@ -117,7 +117,7 @@ def _store_smoothed_landmarks(
117
117
  )
118
118
 
119
119
 
120
- def _smooth_landmarks_core(
120
+ def _smooth_landmarks_core( # NOSONAR(S1172) - polyorder used via closure capture in smoother_fn
121
121
  landmark_sequence: list[dict[str, tuple[float, float, float]] | None],
122
122
  window_length: int,
123
123
  polyorder: int,
@@ -129,7 +129,7 @@ def _smooth_landmarks_core(
129
129
  Args:
130
130
  landmark_sequence: List of landmark dictionaries from each frame
131
131
  window_length: Length of filter window (must be odd)
132
- polyorder: Order of polynomial used to fit samples
132
+ polyorder: Order of polynomial used to fit samples (captured by smoother_fn closure)
133
133
  smoother_fn: Function that takes (x_coords, y_coords, valid_frames)
134
134
  and returns (x_smooth, y_smooth)
135
135
 
@@ -65,6 +65,43 @@ class VideoProcessor:
65
65
  self.display_width,
66
66
  )
67
67
 
68
+ def _parse_sample_aspect_ratio(self, sar_str: str) -> None:
69
+ """
70
+ Parse SAR string and update display dimensions.
71
+
72
+ Args:
73
+ sar_str: SAR string in format "width:height" (e.g., "270:473")
74
+ """
75
+ if not sar_str or ":" not in sar_str:
76
+ return
77
+
78
+ sar_parts = sar_str.split(":")
79
+ sar_width = int(sar_parts[0])
80
+ sar_height = int(sar_parts[1])
81
+
82
+ # Calculate display dimensions if pixels are non-square
83
+ # DAR = (width * SAR_width) / (height * SAR_height)
84
+ if sar_width != sar_height:
85
+ self.display_width = int(self.width * sar_width / sar_height)
86
+ self.display_height = self.height
87
+
88
+ def _extract_rotation_from_stream(self, stream: dict) -> int: # type: ignore[type-arg]
89
+ """
90
+ Extract rotation metadata from video stream.
91
+
92
+ Args:
93
+ stream: ffprobe stream dictionary
94
+
95
+ Returns:
96
+ Rotation angle in degrees (0, 90, -90, 180)
97
+ """
98
+ side_data_list = stream.get("side_data_list", [])
99
+ for side_data in side_data_list:
100
+ if side_data.get("side_data_type") == "Display Matrix":
101
+ rotation = side_data.get("rotation", 0)
102
+ return int(rotation)
103
+ return 0
104
+
68
105
  def _extract_video_metadata(self) -> None:
69
106
  """
70
107
  Extract video metadata including SAR and rotation using ffprobe.
@@ -94,35 +131,22 @@ class VideoProcessor:
94
131
  timeout=5,
95
132
  )
96
133
 
97
- if result.returncode == 0:
98
- data = json.loads(result.stdout)
99
- if "streams" in data and len(data["streams"]) > 0:
100
- stream = data["streams"][0]
101
-
102
- # Extract SAR (Sample Aspect Ratio)
103
- sar_str = stream.get("sample_aspect_ratio", "1:1")
104
-
105
- # Parse SAR (e.g., "270:473")
106
- if sar_str and ":" in sar_str:
107
- sar_parts = sar_str.split(":")
108
- sar_width = int(sar_parts[0])
109
- sar_height = int(sar_parts[1])
110
-
111
- # Calculate display dimensions
112
- # DAR = (width * SAR_width) / (height * SAR_height)
113
- if sar_width != sar_height:
114
- self.display_width = int(
115
- self.width * sar_width / sar_height
116
- )
117
- self.display_height = self.height
118
-
119
- # Extract rotation from side_data_list (common for iPhone videos)
120
- side_data_list = stream.get("side_data_list", [])
121
- for side_data in side_data_list:
122
- if side_data.get("side_data_type") == "Display Matrix":
123
- rotation = side_data.get("rotation", 0)
124
- # Convert to int and normalize to 0, 90, -90, 180
125
- self.rotation = int(rotation)
134
+ if result.returncode != 0:
135
+ return
136
+
137
+ data = json.loads(result.stdout)
138
+ if "streams" not in data or len(data["streams"]) == 0:
139
+ return
140
+
141
+ stream = data["streams"][0]
142
+
143
+ # Extract and parse SAR (Sample Aspect Ratio)
144
+ sar_str = stream.get("sample_aspect_ratio", "1:1")
145
+ self._parse_sample_aspect_ratio(sar_str)
146
+
147
+ # Extract rotation from side_data_list (common for iPhone videos)
148
+ self.rotation = self._extract_rotation_from_stream(stream)
149
+
126
150
  except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError):
127
151
  # If ffprobe fails, keep original dimensions (square pixels)
128
152
  pass