kinemotion 0.76.3__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (53) hide show
  1. kinemotion/__init__.py +3 -18
  2. kinemotion/api.py +7 -27
  3. kinemotion/cli.py +2 -4
  4. kinemotion/{countermovement_jump → cmj}/analysis.py +158 -16
  5. kinemotion/{countermovement_jump → cmj}/api.py +18 -46
  6. kinemotion/{countermovement_jump → cmj}/cli.py +46 -6
  7. kinemotion/cmj/debug_overlay.py +457 -0
  8. kinemotion/{countermovement_jump → cmj}/joint_angles.py +31 -96
  9. kinemotion/{countermovement_jump → cmj}/metrics_validator.py +293 -184
  10. kinemotion/{countermovement_jump → cmj}/validation_bounds.py +18 -1
  11. kinemotion/core/__init__.py +2 -11
  12. kinemotion/core/auto_tuning.py +107 -149
  13. kinemotion/core/cli_utils.py +0 -74
  14. kinemotion/core/debug_overlay_utils.py +15 -142
  15. kinemotion/core/experimental.py +51 -55
  16. kinemotion/core/filtering.py +56 -116
  17. kinemotion/core/pipeline_utils.py +2 -2
  18. kinemotion/core/pose.py +98 -47
  19. kinemotion/core/quality.py +6 -4
  20. kinemotion/core/smoothing.py +51 -65
  21. kinemotion/core/types.py +0 -15
  22. kinemotion/core/validation.py +7 -76
  23. kinemotion/core/video_io.py +27 -41
  24. kinemotion/{drop_jump → dropjump}/__init__.py +8 -2
  25. kinemotion/{drop_jump → dropjump}/analysis.py +120 -282
  26. kinemotion/{drop_jump → dropjump}/api.py +33 -59
  27. kinemotion/{drop_jump → dropjump}/cli.py +136 -70
  28. kinemotion/dropjump/debug_overlay.py +182 -0
  29. kinemotion/{drop_jump → dropjump}/kinematics.py +65 -175
  30. kinemotion/{drop_jump → dropjump}/metrics_validator.py +51 -25
  31. kinemotion/{drop_jump → dropjump}/validation_bounds.py +1 -1
  32. kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx +3 -0
  33. kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx +3 -0
  34. {kinemotion-0.76.3.dist-info → kinemotion-1.0.0.dist-info}/METADATA +26 -75
  35. kinemotion-1.0.0.dist-info/RECORD +49 -0
  36. kinemotion/core/overlay_constants.py +0 -61
  37. kinemotion/core/video_analysis_base.py +0 -132
  38. kinemotion/countermovement_jump/debug_overlay.py +0 -325
  39. kinemotion/drop_jump/debug_overlay.py +0 -241
  40. kinemotion/squat_jump/__init__.py +0 -5
  41. kinemotion/squat_jump/analysis.py +0 -377
  42. kinemotion/squat_jump/api.py +0 -610
  43. kinemotion/squat_jump/cli.py +0 -309
  44. kinemotion/squat_jump/debug_overlay.py +0 -163
  45. kinemotion/squat_jump/kinematics.py +0 -342
  46. kinemotion/squat_jump/metrics_validator.py +0 -438
  47. kinemotion/squat_jump/validation_bounds.py +0 -221
  48. kinemotion-0.76.3.dist-info/RECORD +0 -57
  49. /kinemotion/{countermovement_jump → cmj}/__init__.py +0 -0
  50. /kinemotion/{countermovement_jump → cmj}/kinematics.py +0 -0
  51. {kinemotion-0.76.3.dist-info → kinemotion-1.0.0.dist-info}/WHEEL +0 -0
  52. {kinemotion-0.76.3.dist-info → kinemotion-1.0.0.dist-info}/entry_points.txt +0 -0
  53. {kinemotion-0.76.3.dist-info → kinemotion-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
- """Decorator for marking unused features.
1
+ """Decorators for marking experimental and unused features.
2
2
 
3
- This decorator helps identify code that is implemented but not yet
3
+ These decorators help identify code that is implemented but not yet
4
4
  integrated into the main pipeline, making it easier to track features
5
5
  for future enhancement or cleanup.
6
6
  """
@@ -13,14 +13,61 @@ from typing import TypeVar
13
13
  F = TypeVar("F", bound=Callable)
14
14
 
15
15
 
16
+ def experimental(
17
+ reason: str, *, issue: int | None = None, since: str | None = None
18
+ ) -> Callable[[F], F]:
19
+ """Mark a feature as experimental/not fully integrated.
20
+
21
+ Experimental features are working implementations that haven't been
22
+ fully integrated into the main pipeline. They emit warnings when called
23
+ to alert developers they're using untested/unstable APIs.
24
+
25
+ Args:
26
+ reason: Why this is experimental (e.g., "API unstable", "needs validation")
27
+ issue: Optional GitHub issue number for tracking integration
28
+ since: Optional version when this became experimental
29
+
30
+ Example:
31
+ >>> @experimental("API may change", issue=123, since="0.34.0")
32
+ ... def new_feature():
33
+ ... pass
34
+
35
+ Returns:
36
+ Decorated function that warns on use
37
+ """
38
+
39
+ def decorator(func: F) -> F:
40
+ @functools.wraps(func)
41
+ def wrapper(*args, **kwargs): # type: ignore
42
+ msg = f"{func.__name__} is experimental: {reason}"
43
+ if issue:
44
+ msg += f" (GitHub issue #{issue})"
45
+ if since:
46
+ msg += f" [since v{since}]"
47
+ warnings.warn(msg, FutureWarning, stacklevel=2)
48
+ return func(*args, **kwargs)
49
+
50
+ # Add metadata for documentation/tooling
51
+ wrapper.__experimental__ = True # type: ignore[attr-defined]
52
+ wrapper.__experimental_reason__ = reason # type: ignore[attr-defined]
53
+ if issue:
54
+ wrapper.__experimental_issue__ = issue # type: ignore[attr-defined]
55
+ if since:
56
+ wrapper.__experimental_since__ = since # type: ignore[attr-defined]
57
+
58
+ return wrapper # type: ignore[return-value]
59
+
60
+ return decorator
61
+
62
+
16
63
  def unused(
17
64
  reason: str, *, remove_in: str | None = None, since: str | None = None
18
65
  ) -> Callable[[F], F]:
19
66
  """Mark a feature as implemented but not integrated into pipeline.
20
67
 
21
68
  Unused features are fully working implementations that aren't called
22
- by the main analysis pipeline. These don't emit warnings when called
23
- (they work fine), but are marked for tracking.
69
+ by the main analysis pipeline. Unlike @experimental, these don't emit
70
+ warnings when called (they work fine), but are marked for tracking.
24
71
 
25
72
  Use this for:
26
73
  - Features awaiting CLI integration
@@ -54,54 +101,3 @@ def unused(
54
101
  return func
55
102
 
56
103
  return decorator
57
-
58
-
59
- def experimental(
60
- reason: str, *, issue: int | None = None, since: str | None = None
61
- ) -> Callable[[F], F]:
62
- """Mark a feature as experimental/not fully integrated.
63
-
64
- Experimental features are working implementations that may change
65
- or be removed. They emit a warning when called to alert users.
66
-
67
- Use this for:
68
- - Features under active development
69
- - APIs that may change
70
- - Functionality that needs more testing
71
-
72
- Args:
73
- reason: Why this is experimental (e.g., "API may change")
74
- issue: Optional GitHub issue number tracking this feature
75
- since: Optional version when this became experimental
76
-
77
- Example:
78
- >>> @experimental("API may change", issue=42, since="0.35.0")
79
- ... def new_analysis_method():
80
- ... pass
81
-
82
- Returns:
83
- Wrapped function that emits ExperimentalWarning when called
84
- """
85
-
86
- def decorator(func: F) -> F:
87
- @functools.wraps(func)
88
- def wrapper(*args, **kwargs): # type: ignore[no-untyped-def]
89
- issue_ref = f" (see issue #{issue})" if issue else ""
90
- version_ref = f" since {since}" if since else ""
91
- warnings.warn(
92
- f"{func.__name__} is experimental{version_ref}: {reason}{issue_ref}",
93
- category=FutureWarning,
94
- stacklevel=2,
95
- )
96
- return func(*args, **kwargs)
97
-
98
- wrapper.__experimental__ = True # type: ignore[attr-defined]
99
- wrapper.__experimental_reason__ = reason # type: ignore[attr-defined]
100
- if issue:
101
- wrapper.__experimental_issue__ = issue # type: ignore[attr-defined]
102
- if since:
103
- wrapper.__experimental_since__ = since # type: ignore[attr-defined]
104
-
105
- return wrapper # type: ignore[return-value]
106
-
107
- return decorator
@@ -1,25 +1,11 @@
1
1
  """Advanced filtering techniques for robust trajectory processing."""
2
2
 
3
3
  import numpy as np
4
- from numpy.lib.stride_tricks import sliding_window_view
5
- from scipy.ndimage import convolve1d
6
4
  from scipy.signal import medfilt
7
5
 
8
6
  from .experimental import unused
9
7
 
10
8
 
11
- def _ensure_odd_window_length(window_length: int) -> int:
12
- """Ensure window_length is odd (required for Savitzky-Golay filter).
13
-
14
- Args:
15
- window_length: Desired window length
16
-
17
- Returns:
18
- Odd window length (increments by 1 if even)
19
- """
20
- return window_length + 1 if window_length % 2 == 0 else window_length
21
-
22
-
23
9
  def detect_outliers_ransac(
24
10
  positions: np.ndarray,
25
11
  window_size: int = 15,
@@ -33,8 +19,6 @@ def detect_outliers_ransac(
33
19
  from a polynomial fit of nearby points. This catches MediaPipe tracking glitches
34
20
  where landmarks jump to incorrect positions.
35
21
 
36
- Vectorized implementation using convolution for 10-20x speedup.
37
-
38
22
  Args:
39
23
  positions: 1D array of position values (e.g., y-coordinates)
40
24
  window_size: Size of sliding window for local fitting
@@ -50,82 +34,41 @@ def detect_outliers_ransac(
50
34
  if n < window_size:
51
35
  return is_outlier
52
36
 
53
- window_size = _ensure_odd_window_length(window_size)
54
- half_window = window_size // 2
55
-
56
- # For centered quadratic fit, we can compute the predicted value at
57
- # the window center using convolution. This is much faster than
58
- # calling np.polyfit for each window.
59
- #
60
- # For a quadratic fit y = ax² + bx + c with centered window:
61
- # - Predicted value at center (x=0) is just the intercept c
62
- # - c can be computed from sum(y) and sum(x²*y) using precomputed constants
63
- #
64
- # The key insight: sum(y) and sum(x²*y) are convolution operations!
65
-
66
- # Window indices (centered at 0)
67
- x = np.arange(-half_window, half_window + 1)
68
-
69
- # Precompute constants for the normal equations
70
- sum_x2 = np.sum(x**2)
71
- sum_x4 = np.sum(x**4)
72
- det = window_size * sum_x4 - sum_x2**2
73
-
74
- # Handle edge case where determinant is zero (shouldn't happen with valid window)
75
- if det == 0:
76
- return is_outlier
77
-
78
- # Kernels for convolution
79
- ones_kernel = np.ones(window_size)
80
- x2_kernel = x**2
81
-
82
- # Pad positions for boundary handling (use edge padding like original)
83
- pad_width = half_window
84
- padded = np.pad(positions, pad_width, mode="edge")
37
+ # Ensure window size is odd
38
+ if window_size % 2 == 0:
39
+ window_size += 1
85
40
 
86
- # Compute sums via convolution
87
- # sum_y[i] = sum of positions in window centered at i
88
- # sum_x2y[i] = sum of (x² * positions) in window centered at i
89
- sum_y = convolve1d(padded, ones_kernel, mode="constant")
90
- sum_x2y = convolve1d(padded, x2_kernel, mode="constant")
91
-
92
- # Remove padding to match original positions length
93
- sum_y = sum_y[pad_width:-pad_width]
94
- sum_x2y = sum_x2y[pad_width:-pad_width]
95
-
96
- # Compute predicted values at window centers
97
- # For centered fit: predicted = c = (sum_x4 * sum_y - sum_x2 * sum_x2y) / det
98
- predicted = (sum_x4 * sum_y - sum_x2 * sum_x2y) / det
99
-
100
- # Calculate residuals
101
- residuals = np.abs(positions - predicted)
41
+ half_window = window_size // 2
102
42
 
103
- # Mark outliers based on threshold
104
- outlier_candidates = residuals > threshold
43
+ for i in range(n):
44
+ # Define window around current point
45
+ start = max(0, i - half_window)
46
+ end = min(n, i + half_window + 1)
47
+ window_positions = positions[start:end]
48
+ window_indices = np.arange(start, end)
105
49
 
106
- if not np.any(outlier_candidates):
107
- return is_outlier
50
+ if len(window_positions) < 3:
51
+ continue
108
52
 
109
- # RANSAC criterion: point is outlier if most OTHER points in window are inliers
110
- # Compute fraction of inliers in each window using convolution
111
- inlier_mask = (residuals <= threshold).astype(float)
112
- inliers_in_window = convolve1d(
113
- np.pad(inlier_mask, pad_width, mode="edge"),
114
- ones_kernel,
115
- mode="constant",
116
- )
117
- inliers_in_window = inliers_in_window[pad_width:-pad_width]
118
-
119
- # Account for variable window sizes at boundaries
120
- # At boundaries, windows are smaller, so we need to adjust the count
121
- for i in range(n):
122
- actual_window_size = min(i + half_window + 1, n) - max(0, i - half_window)
123
- if actual_window_size < 3:
53
+ # Fit polynomial (quadratic) to window
54
+ # Use polyfit with degree 2 (parabolic motion)
55
+ try:
56
+ coeffs = np.polyfit(window_indices, window_positions, deg=2)
57
+ predicted = np.polyval(coeffs, window_indices)
58
+
59
+ # Calculate residuals
60
+ residuals = np.abs(window_positions - predicted)
61
+
62
+ # Point is outlier if its residual is large
63
+ local_idx = i - start
64
+ if local_idx < len(residuals) and residuals[local_idx] > threshold:
65
+ # Also check if most other points are inliers (RANSAC criterion)
66
+ inliers = np.sum(residuals <= threshold)
67
+ if inliers / len(residuals) >= min_inliers:
68
+ is_outlier[i] = True
69
+ except np.linalg.LinAlgError:
70
+ # Polyfit failed, skip this window
124
71
  continue
125
- if outlier_candidates[i]:
126
- inlier_fraction = inliers_in_window[i] / actual_window_size
127
- if inlier_fraction >= min_inliers:
128
- is_outlier[i] = True
129
72
 
130
73
  return is_outlier
131
74
 
@@ -150,7 +93,9 @@ def detect_outliers_median(
150
93
  if len(positions) < window_size:
151
94
  return np.zeros(len(positions), dtype=bool)
152
95
 
153
- window_size = _ensure_odd_window_length(window_size)
96
+ # Ensure window size is odd
97
+ if window_size % 2 == 0:
98
+ window_size += 1
154
99
 
155
100
  # Apply median filter
156
101
  median_filtered = medfilt(positions, kernel_size=window_size)
@@ -358,8 +303,6 @@ def bilateral_temporal_filter(
358
303
  1. Temporal distance (like regular smoothing)
359
304
  2. Intensity similarity (preserves edges)
360
305
 
361
- Vectorized implementation using sliding_window_view for 10-30x speedup.
362
-
363
306
  Args:
364
307
  positions: 1D array of position values
365
308
  window_size: Temporal window size (must be odd)
@@ -370,39 +313,36 @@ def bilateral_temporal_filter(
370
313
  Filtered position array
371
314
  """
372
315
  n = len(positions)
373
- if n == 0:
374
- return np.array([])
316
+ filtered = np.zeros(n)
375
317
 
376
- window_size = _ensure_odd_window_length(window_size)
377
- half_window = window_size // 2
318
+ # Ensure window size is odd
319
+ if window_size % 2 == 0:
320
+ window_size += 1
378
321
 
379
- # Pad edges with boundary values to maintain consistent window size
380
- # This provides context for boundary positions while preserving edge information
381
- padded = np.pad(positions, half_window, mode="edge")
322
+ half_window = window_size // 2
382
323
 
383
- # Create all sliding windows at once: shape (n, window_size)
384
- # Each row represents the window centered at the corresponding input position
385
- windows = sliding_window_view(padded, window_size)
324
+ for i in range(n):
325
+ # Define window
326
+ start = max(0, i - half_window)
327
+ end = min(n, i + half_window + 1)
386
328
 
387
- # Precompute spatial weights (only depends on distance from center)
388
- temporal_indices = np.arange(-half_window, half_window + 1)
389
- spatial_weights = np.exp(-(temporal_indices**2) / (2 * sigma_spatial**2))
329
+ # Get window positions
330
+ window_pos = positions[start:end]
331
+ center_pos = positions[i]
390
332
 
391
- # Extract center positions for intensity weight computation
392
- center_positions = windows[:, half_window] # Shape: (n,)
393
- center_positions = center_positions.reshape(-1, 1) # Shape: (n, 1) for broadcast
333
+ # Compute spatial (temporal) weights
334
+ temporal_indices = np.arange(start - i, end - i)
335
+ spatial_weights = np.exp(-(temporal_indices**2) / (2 * sigma_spatial**2))
394
336
 
395
- # Compute intensity weights (data-dependent, varies by window)
396
- # intensity_diff[i, j] = windows[i, j] - windows[i, center]
397
- intensity_diff = windows - center_positions # Broadcasting: (n, window_size)
398
- intensity_weights = np.exp(-(intensity_diff**2) / (2 * sigma_intensity**2))
337
+ # Compute intensity (position difference) weights
338
+ intensity_diff = window_pos - center_pos
339
+ intensity_weights = np.exp(-(intensity_diff**2) / (2 * sigma_intensity**2))
399
340
 
400
- # Combine weights: spatial_weights broadcasts to (n, window_size)
401
- weights = spatial_weights * intensity_weights
402
- # Normalize each window's weights to sum to 1
403
- weights /= weights.sum(axis=1, keepdims=True)
341
+ # Combined weights (bilateral)
342
+ weights = spatial_weights * intensity_weights
343
+ weights /= np.sum(weights) # Normalize
404
344
 
405
- # Compute weighted average for each window
406
- filtered = (weights * windows).sum(axis=1)
345
+ # Weighted average
346
+ filtered[i] = np.sum(weights * window_pos)
407
347
 
408
348
  return filtered
@@ -8,8 +8,8 @@ from typing import TypeVar
8
8
  import cv2
9
9
  import numpy as np
10
10
 
11
- from ..countermovement_jump.analysis import compute_average_hip_position
12
- from ..drop_jump.analysis import compute_average_foot_position
11
+ from ..cmj.analysis import compute_average_hip_position
12
+ from ..dropjump.analysis import compute_average_foot_position
13
13
  from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
14
14
  from .pose import MediaPipePoseTracker
15
15
  from .smoothing import smooth_landmarks, smooth_landmarks_advanced
kinemotion/core/pose.py CHANGED
@@ -198,6 +198,55 @@ class PoseTrackerFactory:
198
198
 
199
199
  return MediaPipePoseTracker(**filtered_kwargs)
200
200
 
201
+ @classmethod
202
+ def get_available_backends(cls) -> list[str]:
203
+ """Get list of available backends.
204
+
205
+ Returns:
206
+ List containing 'mediapipe'
207
+ """
208
+ return ["mediapipe"]
209
+
210
+ @classmethod
211
+ def get_backend_info(cls, backend: str) -> dict[str, str]:
212
+ """Get information about a backend.
213
+
214
+ Args:
215
+ backend: Backend name
216
+
217
+ Returns:
218
+ Dictionary with backend information
219
+ """
220
+ if backend.lower() in ("mediapipe", "mp"):
221
+ return {
222
+ "name": "MediaPipe",
223
+ "description": "Pose tracking using MediaPipe Tasks API",
224
+ "performance": "~48 FPS",
225
+ "accuracy": "Reference (validated for jumps)",
226
+ "requirements": "mediapipe package",
227
+ }
228
+ return {}
229
+
230
+
231
+ def get_tracker_info(tracker: object) -> str:
232
+ """Get detailed information about a pose tracker instance.
233
+
234
+ Args:
235
+ tracker: Pose tracker instance
236
+
237
+ Returns:
238
+ Formatted string with tracker details
239
+ """
240
+ tracker_class = type(tracker).__name__
241
+ module = type(tracker).__module__
242
+
243
+ info = f"{tracker_class} (from {module})"
244
+
245
+ if tracker_class == "MediaPipePoseTracker":
246
+ info += " [MediaPipe Tasks API]"
247
+
248
+ return info
249
+
201
250
 
202
251
  def _extract_landmarks_from_results(
203
252
  pose_landmarks: mp.tasks.vision.components.containers.NormalizedLandmark, # type: ignore[valid-type]
@@ -224,6 +273,28 @@ def _extract_landmarks_from_results(
224
273
  return landmarks
225
274
 
226
275
 
276
+ # Legacy compatibility aliases for Solution API enum values
277
+ class _LegacyPoseLandmark:
278
+ """Compatibility shim for Solution API enum values."""
279
+
280
+ LEFT_ANKLE = 27
281
+ RIGHT_ANKLE = 28
282
+ LEFT_HEEL = 29
283
+ RIGHT_HEEL = 30
284
+ LEFT_FOOT_INDEX = 31
285
+ RIGHT_FOOT_INDEX = 32
286
+ LEFT_HIP = 23
287
+ RIGHT_HIP = 24
288
+ LEFT_SHOULDER = 11
289
+ RIGHT_SHOULDER = 12
290
+ NOSE = 0
291
+ LEFT_KNEE = 25
292
+ RIGHT_KNEE = 26
293
+
294
+
295
+ PoseLandmark = _LegacyPoseLandmark
296
+
297
+
227
298
  def compute_center_of_mass(
228
299
  landmarks: dict[str, tuple[float, float, float]],
229
300
  visibility_threshold: float = 0.5,
@@ -302,37 +373,6 @@ def compute_center_of_mass(
302
373
  return (com_x, com_y, com_visibility)
303
374
 
304
375
 
305
- def _compute_mean_landmark_position(
306
- landmark_keys: list[str],
307
- landmarks: dict[str, tuple[float, float, float]],
308
- vis_threshold: float,
309
- ) -> tuple[float, float, float] | None:
310
- """Compute mean position and visibility from multiple landmarks.
311
-
312
- Args:
313
- landmark_keys: List of landmark key names to average
314
- landmarks: Dictionary of landmark positions
315
- vis_threshold: Minimum visibility threshold
316
-
317
- Returns:
318
- (x, y, visibility) tuple if any landmarks are visible, else None
319
- """
320
- positions = [
321
- (x, y, vis)
322
- for key in landmark_keys
323
- if key in landmarks
324
- for x, y, vis in [landmarks[key]]
325
- if vis > vis_threshold
326
- ]
327
- if not positions:
328
- return None
329
-
330
- x = float(np.mean([p[0] for p in positions]))
331
- y = float(np.mean([p[1] for p in positions]))
332
- vis = float(np.mean([p[2] for p in positions]))
333
- return (x, y, vis)
334
-
335
-
336
376
  def _add_head_segment(
337
377
  segments: list,
338
378
  weights: list,
@@ -358,17 +398,20 @@ def _add_trunk_segment(
358
398
  ) -> None:
359
399
  """Add trunk segment (50% body mass) if visible."""
360
400
  trunk_keys = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
361
- trunk_pos = _compute_mean_landmark_position(trunk_keys, landmarks, vis_threshold)
362
-
363
- if trunk_pos is not None:
364
- # Require at least 2 visible landmarks for valid trunk
365
- visible_count = sum(
366
- 1 for key in trunk_keys if key in landmarks and landmarks[key][2] > vis_threshold
367
- )
368
- if visible_count >= 2:
369
- segments.append((trunk_pos[0], trunk_pos[1]))
370
- weights.append(0.50)
371
- visibilities.append(trunk_pos[2])
401
+ trunk_pos = [
402
+ (x, y, vis)
403
+ for key in trunk_keys
404
+ if key in landmarks
405
+ for x, y, vis in [landmarks[key]]
406
+ if vis > vis_threshold
407
+ ]
408
+ if len(trunk_pos) >= 2:
409
+ trunk_x = float(np.mean([p[0] for p in trunk_pos]))
410
+ trunk_y = float(np.mean([p[1] for p in trunk_pos]))
411
+ trunk_vis = float(np.mean([p[2] for p in trunk_pos]))
412
+ segments.append((trunk_x, trunk_y))
413
+ weights.append(0.50)
414
+ visibilities.append(trunk_vis)
372
415
 
373
416
 
374
417
  def _add_limb_segment(
@@ -408,9 +451,17 @@ def _add_foot_segment(
408
451
  ) -> None:
409
452
  """Add foot segment (1.5% body mass per foot) if visible."""
410
453
  foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
411
- foot_pos = _compute_mean_landmark_position(foot_keys, landmarks, vis_threshold)
412
-
413
- if foot_pos is not None:
414
- segments.append((foot_pos[0], foot_pos[1]))
454
+ foot_pos = [
455
+ (x, y, vis)
456
+ for key in foot_keys
457
+ if key in landmarks
458
+ for x, y, vis in [landmarks[key]]
459
+ if vis > vis_threshold
460
+ ]
461
+ if foot_pos:
462
+ foot_x = float(np.mean([p[0] for p in foot_pos]))
463
+ foot_y = float(np.mean([p[1] for p in foot_pos]))
464
+ foot_vis = float(np.mean([p[2] for p in foot_pos]))
465
+ segments.append((foot_x, foot_y))
415
466
  weights.append(0.015)
416
- visibilities.append(foot_pos[2])
467
+ visibilities.append(foot_vis)
@@ -86,11 +86,13 @@ def calculate_position_stability(
86
86
  if len(positions) < window_size:
87
87
  return float(np.var(positions))
88
88
 
89
- # Vectorized rolling variance using sliding window view
90
- from numpy.lib.stride_tricks import sliding_window_view
89
+ # Calculate rolling variance
90
+ rolling_vars = []
91
+ for i in range(len(positions) - window_size + 1):
92
+ window = positions[i : i + window_size]
93
+ rolling_vars.append(np.var(window))
91
94
 
92
- windows = sliding_window_view(positions, window_size)
93
- return float(np.mean(np.var(windows, axis=1)))
95
+ return float(np.mean(rolling_vars))
94
96
 
95
97
 
96
98
  def assess_tracking_quality(