kinemotion 0.12.0__tar.gz → 0.12.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (72) hide show
  1. {kinemotion-0.12.0 → kinemotion-0.12.2}/CHANGELOG.md +38 -0
  2. {kinemotion-0.12.0 → kinemotion-0.12.2}/PKG-INFO +1 -1
  3. {kinemotion-0.12.0 → kinemotion-0.12.2}/examples/bulk/bulk_processing.py +3 -10
  4. {kinemotion-0.12.0 → kinemotion-0.12.2}/examples/bulk/simple_example.py +5 -7
  5. {kinemotion-0.12.0 → kinemotion-0.12.2}/pyproject.toml +1 -1
  6. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/api.py +0 -2
  7. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/cli_utils.py +0 -2
  8. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/pose.py +134 -95
  9. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/smoothing.py +2 -2
  10. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/dropjump/analysis.py +169 -123
  11. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/dropjump/cli.py +0 -2
  12. kinemotion-0.12.2/src/kinemotion/dropjump/debug_overlay.py +179 -0
  13. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/dropjump/kinematics.py +3 -69
  14. {kinemotion-0.12.0 → kinemotion-0.12.2}/uv.lock +1 -1
  15. kinemotion-0.12.0/src/kinemotion/dropjump/debug_overlay.py +0 -167
  16. {kinemotion-0.12.0 → kinemotion-0.12.2}/.dockerignore +0 -0
  17. {kinemotion-0.12.0 → kinemotion-0.12.2}/.github/ISSUE_TEMPLATE/bug_report.yml +0 -0
  18. {kinemotion-0.12.0 → kinemotion-0.12.2}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  19. {kinemotion-0.12.0 → kinemotion-0.12.2}/.github/ISSUE_TEMPLATE/feature_request.yml +0 -0
  20. {kinemotion-0.12.0 → kinemotion-0.12.2}/.github/pull_request_template.md +0 -0
  21. {kinemotion-0.12.0 → kinemotion-0.12.2}/.github/workflows/release.yml +0 -0
  22. {kinemotion-0.12.0 → kinemotion-0.12.2}/.gitignore +0 -0
  23. {kinemotion-0.12.0 → kinemotion-0.12.2}/.pre-commit-config.yaml +0 -0
  24. {kinemotion-0.12.0 → kinemotion-0.12.2}/.tool-versions +0 -0
  25. {kinemotion-0.12.0 → kinemotion-0.12.2}/CLAUDE.md +0 -0
  26. {kinemotion-0.12.0 → kinemotion-0.12.2}/CODE_OF_CONDUCT.md +0 -0
  27. {kinemotion-0.12.0 → kinemotion-0.12.2}/CONTRIBUTING.md +0 -0
  28. {kinemotion-0.12.0 → kinemotion-0.12.2}/Dockerfile +0 -0
  29. {kinemotion-0.12.0 → kinemotion-0.12.2}/GEMINI.md +0 -0
  30. {kinemotion-0.12.0 → kinemotion-0.12.2}/LICENSE +0 -0
  31. {kinemotion-0.12.0 → kinemotion-0.12.2}/README.md +0 -0
  32. {kinemotion-0.12.0 → kinemotion-0.12.2}/SECURITY.md +0 -0
  33. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/BULK_PROCESSING.md +0 -0
  34. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/CAMERA_SETUP.md +0 -0
  35. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/CAMERA_SETUP_ES.md +0 -0
  36. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/CMJ_GUIDE.md +0 -0
  37. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/ERRORS_FINDINGS.md +0 -0
  38. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/FRAMERATE.md +0 -0
  39. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/IMU_METADATA_PRESERVATION.md +0 -0
  40. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/PARAMETERS.md +0 -0
  41. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/REAL_TIME_ANALYSIS.md +0 -0
  42. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/TRIPLE_EXTENSION.md +0 -0
  43. {kinemotion-0.12.0 → kinemotion-0.12.2}/docs/VALIDATION_PLAN.md +0 -0
  44. {kinemotion-0.12.0 → kinemotion-0.12.2}/examples/bulk/README.md +0 -0
  45. {kinemotion-0.12.0 → kinemotion-0.12.2}/examples/programmatic_usage.py +0 -0
  46. {kinemotion-0.12.0 → kinemotion-0.12.2}/samples/cmjs/README.md +0 -0
  47. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/__init__.py +0 -0
  48. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cli.py +0 -0
  49. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cmj/__init__.py +0 -0
  50. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cmj/analysis.py +0 -0
  51. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cmj/cli.py +0 -0
  52. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cmj/debug_overlay.py +0 -0
  53. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cmj/joint_angles.py +0 -0
  54. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/cmj/kinematics.py +0 -0
  55. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/__init__.py +0 -0
  56. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/auto_tuning.py +0 -0
  57. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/debug_overlay_utils.py +0 -0
  58. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/filtering.py +0 -0
  59. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/core/video_io.py +0 -0
  60. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/dropjump/__init__.py +0 -0
  61. {kinemotion-0.12.0 → kinemotion-0.12.2}/src/kinemotion/py.typed +0 -0
  62. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/__init__.py +0 -0
  63. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_adaptive_threshold.py +0 -0
  64. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_api.py +0 -0
  65. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_aspect_ratio.py +0 -0
  66. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_cmj_analysis.py +0 -0
  67. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_cmj_kinematics.py +0 -0
  68. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_com_estimation.py +0 -0
  69. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_contact_detection.py +0 -0
  70. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_filtering.py +0 -0
  71. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_kinematics.py +0 -0
  72. {kinemotion-0.12.0 → kinemotion-0.12.2}/tests/test_polyorder.py +0 -0
@@ -7,6 +7,44 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  <!-- version list -->
9
9
 
10
+ ## v0.12.2 (2025-11-06)
11
+
12
+ ### Bug Fixes
13
+
14
+ - **core**: Suppress false positive for polyorder parameter
15
+ ([`ae5ffea`](https://github.com/feniix/kinemotion/commit/ae5ffea708741592e1cd356cdf35dcc388cbe97f))
16
+
17
+ - **dropjump**: Remove unused parameters from calculate_drop_jump_metrics
18
+ ([`6130c11`](https://github.com/feniix/kinemotion/commit/6130c113be71dcd8c278b1f31a3b5e300a6b4532))
19
+
20
+ ### Refactoring
21
+
22
+ - **core**: Reduce cognitive complexity in pose.py
23
+ ([`f0a3805`](https://github.com/feniix/kinemotion/commit/f0a380561844e54b4372f57c93b82f8c8a1440ee))
24
+
25
+ - **dropjump**: Reduce cognitive complexity in analysis.py
26
+ ([`180bb37`](https://github.com/feniix/kinemotion/commit/180bb373f63675ef6ecacaea8e9ee9f63c3d3746))
27
+
28
+ - **dropjump**: Reduce cognitive complexity in debug_overlay.py
29
+ ([`076cb56`](https://github.com/feniix/kinemotion/commit/076cb560c55baaff0ba93d0631eb38d69f8a7d7b))
30
+
31
+
32
+ ## v0.12.1 (2025-11-06)
33
+
34
+ ### Bug Fixes
35
+
36
+ - **core**: Remove unreachable duplicate return statement
37
+ ([`294115d`](https://github.com/feniix/kinemotion/commit/294115da761b2851ecc4405a6503138851a56ad1))
38
+
39
+ - **examples**: Remove drop_height from API examples
40
+ ([`f3da09e`](https://github.com/feniix/kinemotion/commit/f3da09ef4ab050b13b80b9fdd8c7734e4556647a))
41
+
42
+ ### Refactoring
43
+
44
+ - **dropjump**: Remove unused calibration parameters
45
+ ([`1a7572c`](https://github.com/feniix/kinemotion/commit/1a7572c83ff4e990e39dcb96ff61220adf40818e))
46
+
47
+
10
48
  ## v0.12.0 (2025-11-06)
11
49
 
12
50
  ### Documentation
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.12.0
3
+ Version: 0.12.2
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -18,9 +18,9 @@ def example_simple_bulk() -> None:
18
18
  print("=" * 80)
19
19
 
20
20
  video_configs = [
21
- VideoConfig(video_path="video1.mp4", drop_height=0.40),
22
- VideoConfig(video_path="video2.mp4", drop_height=0.30),
23
- VideoConfig(video_path="video3.mp4", drop_height=0.50),
21
+ VideoConfig(video_path="video1.mp4"),
22
+ VideoConfig(video_path="video2.mp4"),
23
+ VideoConfig(video_path="video3.mp4"),
24
24
  ]
25
25
 
26
26
  # Process videos with 4 parallel workers
@@ -41,21 +41,18 @@ def example_advanced_configuration() -> None:
41
41
  # Fast analysis for quick screening
42
42
  VideoConfig(
43
43
  video_path="athlete1_trial1.mp4",
44
- drop_height=0.40,
45
44
  quality="fast",
46
45
  json_output="results/athlete1_trial1.json",
47
46
  ),
48
47
  # Balanced analysis (default)
49
48
  VideoConfig(
50
49
  video_path="athlete1_trial2.mp4",
51
- drop_height=0.40,
52
50
  quality="balanced",
53
51
  json_output="results/athlete1_trial2.json",
54
52
  ),
55
53
  # Research-grade accurate analysis with debug video
56
54
  VideoConfig(
57
55
  video_path="athlete1_trial3.mp4",
58
- drop_height=0.40,
59
56
  quality="accurate",
60
57
  output_video="debug/athlete1_trial3_debug.mp4",
61
58
  json_output="results/athlete1_trial3.json",
@@ -101,7 +98,6 @@ def example_process_directory() -> list[VideoResult]:
101
98
  dir_configs = [
102
99
  VideoConfig(
103
100
  video_path=str(video_file),
104
- drop_height=0.40,
105
101
  quality="balanced",
106
102
  json_output=f"results/{video_file.stem}.json",
107
103
  )
@@ -187,7 +183,6 @@ def example_custom_parameters() -> None:
187
183
  # Low quality video - use more aggressive smoothing
188
184
  VideoConfig(
189
185
  video_path="low_quality.mp4",
190
- drop_height=0.40,
191
186
  smoothing_window=7, # More smoothing
192
187
  velocity_threshold=0.025, # Higher threshold
193
188
  quality="accurate",
@@ -195,7 +190,6 @@ def example_custom_parameters() -> None:
195
190
  # High speed video - adjust for higher framerate
196
191
  VideoConfig(
197
192
  video_path="high_speed_120fps.mp4",
198
- drop_height=0.40,
199
193
  quality="accurate",
200
194
  # Auto-tuning will handle FPS adjustments
201
195
  ),
@@ -290,7 +284,6 @@ def example_single_video() -> None:
290
284
  # Process single video with verbose output
291
285
  metrics = process_video(
292
286
  video_path="sample.mp4",
293
- drop_height=0.40,
294
287
  quality="balanced",
295
288
  output_video="sample_debug.mp4",
296
289
  json_output="sample_results.json",
@@ -12,10 +12,9 @@ def process_single_video_example() -> None:
12
12
  """Process a single video - the simplest usage."""
13
13
  print("Processing single video...")
14
14
 
15
- # Process with just the required parameters
15
+ # Process with just the video path
16
16
  metrics = process_video(
17
17
  video_path="my_video.mp4",
18
- drop_height=0.40, # 40cm drop box
19
18
  verbose=True,
20
19
  )
21
20
 
@@ -36,10 +35,10 @@ def process_multiple_videos_example() -> None:
36
35
 
37
36
  # Configure videos to process
38
37
  configs = [
39
- VideoConfig("athlete1_jump1.mp4", drop_height=0.40),
40
- VideoConfig("athlete1_jump2.mp4", drop_height=0.40),
41
- VideoConfig("athlete1_jump3.mp4", drop_height=0.40),
42
- VideoConfig("athlete2_jump1.mp4", drop_height=0.30), # Different drop height
38
+ VideoConfig("athlete1_jump1.mp4"),
39
+ VideoConfig("athlete1_jump2.mp4"),
40
+ VideoConfig("athlete1_jump3.mp4"),
41
+ VideoConfig("athlete2_jump1.mp4", quality="accurate"),
43
42
  ]
44
43
 
45
44
  # Process all videos using 4 parallel workers
@@ -74,7 +73,6 @@ def process_with_outputs_example() -> None:
74
73
 
75
74
  metrics = process_video(
76
75
  video_path="my_video.mp4",
77
- drop_height=0.40,
78
76
  output_video="debug_output.mp4", # Save annotated video
79
77
  json_output="results.json", # Save metrics as JSON
80
78
  quality="accurate", # Use highest quality analysis
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "kinemotion"
3
- version = "0.12.0"
3
+ version = "0.12.2"
4
4
  description = "Video-based kinematic analysis for athletic performance"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10,<3.13"
@@ -463,13 +463,11 @@ def process_video(
463
463
  contact_states,
464
464
  vertical_positions,
465
465
  video.fps,
466
- drop_height_m=None,
467
466
  drop_start_frame=drop_start_frame,
468
467
  velocity_threshold=params.velocity_threshold,
469
468
  smoothing_window=params.smoothing_window,
470
469
  polyorder=params.polyorder,
471
470
  use_curvature=params.use_curvature,
472
- kinematic_correction_factor=1.0,
473
471
  )
474
472
 
475
473
  # Generate outputs (JSON and debug video)
@@ -210,5 +210,3 @@ def common_output_options(func: Callable) -> Callable: # type: ignore[type-arg]
210
210
  help="Path for JSON metrics output (default: stdout)",
211
211
  )(func)
212
212
  return func
213
-
214
- return func
@@ -81,6 +81,100 @@ class PoseTracker:
81
81
  self.pose.close()
82
82
 
83
83
 
84
+ def _add_head_segment(
85
+ segments: list,
86
+ weights: list,
87
+ visibilities: list,
88
+ landmarks: dict[str, tuple[float, float, float]],
89
+ vis_threshold: float,
90
+ ) -> None:
91
+ """Add head segment (8% body mass) if visible."""
92
+ if "nose" in landmarks:
93
+ x, y, vis = landmarks["nose"]
94
+ if vis > vis_threshold:
95
+ segments.append((x, y))
96
+ weights.append(0.08)
97
+ visibilities.append(vis)
98
+
99
+
100
+ def _add_trunk_segment(
101
+ segments: list,
102
+ weights: list,
103
+ visibilities: list,
104
+ landmarks: dict[str, tuple[float, float, float]],
105
+ vis_threshold: float,
106
+ ) -> None:
107
+ """Add trunk segment (50% body mass) if visible."""
108
+ trunk_keys = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
109
+ trunk_pos = [
110
+ (x, y, vis)
111
+ for key in trunk_keys
112
+ if key in landmarks
113
+ for x, y, vis in [landmarks[key]]
114
+ if vis > vis_threshold
115
+ ]
116
+ if len(trunk_pos) >= 2:
117
+ trunk_x = float(np.mean([p[0] for p in trunk_pos]))
118
+ trunk_y = float(np.mean([p[1] for p in trunk_pos]))
119
+ trunk_vis = float(np.mean([p[2] for p in trunk_pos]))
120
+ segments.append((trunk_x, trunk_y))
121
+ weights.append(0.50)
122
+ visibilities.append(trunk_vis)
123
+
124
+
125
+ def _add_limb_segment(
126
+ segments: list,
127
+ weights: list,
128
+ visibilities: list,
129
+ landmarks: dict[str, tuple[float, float, float]],
130
+ side: str,
131
+ proximal_key: str,
132
+ distal_key: str,
133
+ segment_weight: float,
134
+ vis_threshold: float,
135
+ ) -> None:
136
+ """Add a limb segment (thigh or lower leg) if both endpoints visible."""
137
+ prox_full = f"{side}_{proximal_key}"
138
+ dist_full = f"{side}_{distal_key}"
139
+
140
+ if prox_full in landmarks and dist_full in landmarks:
141
+ px, py, pvis = landmarks[prox_full]
142
+ dx, dy, dvis = landmarks[dist_full]
143
+ if pvis > vis_threshold and dvis > vis_threshold:
144
+ seg_x = (px + dx) / 2
145
+ seg_y = (py + dy) / 2
146
+ seg_vis = (pvis + dvis) / 2
147
+ segments.append((seg_x, seg_y))
148
+ weights.append(segment_weight)
149
+ visibilities.append(seg_vis)
150
+
151
+
152
+ def _add_foot_segment(
153
+ segments: list,
154
+ weights: list,
155
+ visibilities: list,
156
+ landmarks: dict[str, tuple[float, float, float]],
157
+ side: str,
158
+ vis_threshold: float,
159
+ ) -> None:
160
+ """Add foot segment (1.5% body mass per foot) if visible."""
161
+ foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
162
+ foot_pos = [
163
+ (x, y, vis)
164
+ for key in foot_keys
165
+ if key in landmarks
166
+ for x, y, vis in [landmarks[key]]
167
+ if vis > vis_threshold
168
+ ]
169
+ if foot_pos:
170
+ foot_x = float(np.mean([p[0] for p in foot_pos]))
171
+ foot_y = float(np.mean([p[1] for p in foot_pos]))
172
+ foot_vis = float(np.mean([p[2] for p in foot_pos]))
173
+ segments.append((foot_x, foot_y))
174
+ weights.append(0.015)
175
+ visibilities.append(foot_vis)
176
+
177
+
84
178
  def compute_center_of_mass(
85
179
  landmarks: dict[str, tuple[float, float, float]],
86
180
  visibility_threshold: float = 0.5,
@@ -106,114 +200,59 @@ def compute_center_of_mass(
106
200
  (x, y, visibility) tuple for estimated CoM position
107
201
  visibility = average visibility of all segments used
108
202
  """
109
- # Define segment representatives and their weights (as fraction of body mass)
110
- # Each segment uses midpoint or average of its bounding landmarks
111
- segments = []
112
- segment_weights = []
113
- visibilities = []
203
+ segments: list = []
204
+ weights: list = []
205
+ visibilities: list = []
114
206
 
115
- # Head segment: 8% (use nose as proxy)
116
- if "nose" in landmarks:
117
- x, y, vis = landmarks["nose"]
118
- if vis > visibility_threshold:
119
- segments.append((x, y))
120
- segment_weights.append(0.08)
121
- visibilities.append(vis)
207
+ # Add body segments
208
+ _add_head_segment(segments, weights, visibilities, landmarks, visibility_threshold)
209
+ _add_trunk_segment(segments, weights, visibilities, landmarks, visibility_threshold)
122
210
 
123
- # Trunk segment: 50% (midpoint between shoulders and hips)
124
- trunk_landmarks = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
125
- trunk_positions = [
126
- (x, y, vis)
127
- for key in trunk_landmarks
128
- if key in landmarks
129
- for x, y, vis in [landmarks[key]]
130
- if vis > visibility_threshold
131
- ]
132
- if len(trunk_positions) >= 2:
133
- trunk_x = float(np.mean([pos[0] for pos in trunk_positions]))
134
- trunk_y = float(np.mean([pos[1] for pos in trunk_positions]))
135
- trunk_vis = float(np.mean([pos[2] for pos in trunk_positions]))
136
- segments.append((trunk_x, trunk_y))
137
- segment_weights.append(0.50)
138
- visibilities.append(trunk_vis)
139
-
140
- # Thigh segment: 20% total (midpoint hip to knee for each leg)
211
+ # Add bilateral limb segments
141
212
  for side in ["left", "right"]:
142
- hip_key = f"{side}_hip"
143
- knee_key = f"{side}_knee"
144
- if hip_key in landmarks and knee_key in landmarks:
145
- hip_x, hip_y, hip_vis = landmarks[hip_key]
146
- knee_x, knee_y, knee_vis = landmarks[knee_key]
147
- if hip_vis > visibility_threshold and knee_vis > visibility_threshold:
148
- thigh_x = (hip_x + knee_x) / 2
149
- thigh_y = (hip_y + knee_y) / 2
150
- thigh_vis = (hip_vis + knee_vis) / 2
151
- segments.append((thigh_x, thigh_y))
152
- segment_weights.append(0.10) # 10% per leg
153
- visibilities.append(thigh_vis)
154
-
155
- # Lower leg segment: 10% total (midpoint knee to ankle for each leg)
156
- for side in ["left", "right"]:
157
- knee_key = f"{side}_knee"
158
- ankle_key = f"{side}_ankle"
159
- if knee_key in landmarks and ankle_key in landmarks:
160
- knee_x, knee_y, knee_vis = landmarks[knee_key]
161
- ankle_x, ankle_y, ankle_vis = landmarks[ankle_key]
162
- if knee_vis > visibility_threshold and ankle_vis > visibility_threshold:
163
- leg_x = (knee_x + ankle_x) / 2
164
- leg_y = (knee_y + ankle_y) / 2
165
- leg_vis = (knee_vis + ankle_vis) / 2
166
- segments.append((leg_x, leg_y))
167
- segment_weights.append(0.05) # 5% per leg
168
- visibilities.append(leg_vis)
169
-
170
- # Foot segment: 3% total (average of ankle, heel, foot_index)
171
- for side in ["left", "right"]:
172
- foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
173
- foot_positions = [
174
- (x, y, vis)
175
- for key in foot_keys
176
- if key in landmarks
177
- for x, y, vis in [landmarks[key]]
178
- if vis > visibility_threshold
179
- ]
180
- if foot_positions:
181
- foot_x = float(np.mean([pos[0] for pos in foot_positions]))
182
- foot_y = float(np.mean([pos[1] for pos in foot_positions]))
183
- foot_vis = float(np.mean([pos[2] for pos in foot_positions]))
184
- segments.append((foot_x, foot_y))
185
- segment_weights.append(0.015) # 1.5% per foot
186
- visibilities.append(foot_vis)
187
-
188
- # If no segments found, fall back to hip average
213
+ _add_limb_segment(
214
+ segments,
215
+ weights,
216
+ visibilities,
217
+ landmarks,
218
+ side,
219
+ "hip",
220
+ "knee",
221
+ 0.10,
222
+ visibility_threshold,
223
+ )
224
+ _add_limb_segment(
225
+ segments,
226
+ weights,
227
+ visibilities,
228
+ landmarks,
229
+ side,
230
+ "knee",
231
+ "ankle",
232
+ 0.05,
233
+ visibility_threshold,
234
+ )
235
+ _add_foot_segment(
236
+ segments, weights, visibilities, landmarks, side, visibility_threshold
237
+ )
238
+
239
+ # Fallback if no segments found
189
240
  if not segments:
190
241
  if "left_hip" in landmarks and "right_hip" in landmarks:
191
242
  lh_x, lh_y, lh_vis = landmarks["left_hip"]
192
243
  rh_x, rh_y, rh_vis = landmarks["right_hip"]
193
- return (
194
- (lh_x + rh_x) / 2,
195
- (lh_y + rh_y) / 2,
196
- (lh_vis + rh_vis) / 2,
197
- )
198
- # Ultimate fallback: center of frame
244
+ return ((lh_x + rh_x) / 2, (lh_y + rh_y) / 2, (lh_vis + rh_vis) / 2)
199
245
  return (0.5, 0.5, 0.0)
200
246
 
201
- # Normalize weights to sum to 1.0
202
- total_weight = sum(segment_weights)
203
- normalized_weights = [w / total_weight for w in segment_weights]
247
+ # Normalize weights and compute weighted average
248
+ total_weight = sum(weights)
249
+ normalized_weights = [w / total_weight for w in weights]
204
250
 
205
- # Compute weighted average of segment positions
206
251
  com_x = float(
207
- sum(
208
- pos[0] * weight
209
- for pos, weight in zip(segments, normalized_weights, strict=True)
210
- )
252
+ sum(p[0] * w for p, w in zip(segments, normalized_weights, strict=True))
211
253
  )
212
254
  com_y = float(
213
- sum(
214
- pos[1] * weight
215
- for pos, weight in zip(segments, normalized_weights, strict=True)
216
- )
255
+ sum(p[1] * w for p, w in zip(segments, normalized_weights, strict=True))
217
256
  )
218
257
  com_visibility = float(np.mean(visibilities)) if visibilities else 0.0
219
258
 
@@ -117,7 +117,7 @@ def _store_smoothed_landmarks(
117
117
  )
118
118
 
119
119
 
120
- def _smooth_landmarks_core(
120
+ def _smooth_landmarks_core( # NOSONAR(S1172) - polyorder used via closure capture in smoother_fn
121
121
  landmark_sequence: list[dict[str, tuple[float, float, float]] | None],
122
122
  window_length: int,
123
123
  polyorder: int,
@@ -129,7 +129,7 @@ def _smooth_landmarks_core(
129
129
  Args:
130
130
  landmark_sequence: List of landmark dictionaries from each frame
131
131
  window_length: Length of filter window (must be odd)
132
- polyorder: Order of polynomial used to fit samples
132
+ polyorder: Order of polynomial used to fit samples (captured by smoother_fn closure)
133
133
  smoother_fn: Function that takes (x_coords, y_coords, valid_frames)
134
134
  and returns (x_smooth, y_smooth)
135
135