kinemotion 0.12.1__py3-none-any.whl → 0.12.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/api.py CHANGED
@@ -463,13 +463,11 @@ def process_video(
463
463
  contact_states,
464
464
  vertical_positions,
465
465
  video.fps,
466
- drop_height_m=None,
467
466
  drop_start_frame=drop_start_frame,
468
467
  velocity_threshold=params.velocity_threshold,
469
468
  smoothing_window=params.smoothing_window,
470
469
  polyorder=params.polyorder,
471
470
  use_curvature=params.use_curvature,
472
- kinematic_correction_factor=1.0,
473
471
  )
474
472
 
475
473
  # Generate outputs (JSON and debug video)
kinemotion/core/pose.py CHANGED
@@ -81,6 +81,100 @@ class PoseTracker:
81
81
  self.pose.close()
82
82
 
83
83
 
84
+ def _add_head_segment(
85
+ segments: list,
86
+ weights: list,
87
+ visibilities: list,
88
+ landmarks: dict[str, tuple[float, float, float]],
89
+ vis_threshold: float,
90
+ ) -> None:
91
+ """Add head segment (8% body mass) if visible."""
92
+ if "nose" in landmarks:
93
+ x, y, vis = landmarks["nose"]
94
+ if vis > vis_threshold:
95
+ segments.append((x, y))
96
+ weights.append(0.08)
97
+ visibilities.append(vis)
98
+
99
+
100
+ def _add_trunk_segment(
101
+ segments: list,
102
+ weights: list,
103
+ visibilities: list,
104
+ landmarks: dict[str, tuple[float, float, float]],
105
+ vis_threshold: float,
106
+ ) -> None:
107
+ """Add trunk segment (50% body mass) if visible."""
108
+ trunk_keys = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
109
+ trunk_pos = [
110
+ (x, y, vis)
111
+ for key in trunk_keys
112
+ if key in landmarks
113
+ for x, y, vis in [landmarks[key]]
114
+ if vis > vis_threshold
115
+ ]
116
+ if len(trunk_pos) >= 2:
117
+ trunk_x = float(np.mean([p[0] for p in trunk_pos]))
118
+ trunk_y = float(np.mean([p[1] for p in trunk_pos]))
119
+ trunk_vis = float(np.mean([p[2] for p in trunk_pos]))
120
+ segments.append((trunk_x, trunk_y))
121
+ weights.append(0.50)
122
+ visibilities.append(trunk_vis)
123
+
124
+
125
+ def _add_limb_segment(
126
+ segments: list,
127
+ weights: list,
128
+ visibilities: list,
129
+ landmarks: dict[str, tuple[float, float, float]],
130
+ side: str,
131
+ proximal_key: str,
132
+ distal_key: str,
133
+ segment_weight: float,
134
+ vis_threshold: float,
135
+ ) -> None:
136
+ """Add a limb segment (thigh or lower leg) if both endpoints visible."""
137
+ prox_full = f"{side}_{proximal_key}"
138
+ dist_full = f"{side}_{distal_key}"
139
+
140
+ if prox_full in landmarks and dist_full in landmarks:
141
+ px, py, pvis = landmarks[prox_full]
142
+ dx, dy, dvis = landmarks[dist_full]
143
+ if pvis > vis_threshold and dvis > vis_threshold:
144
+ seg_x = (px + dx) / 2
145
+ seg_y = (py + dy) / 2
146
+ seg_vis = (pvis + dvis) / 2
147
+ segments.append((seg_x, seg_y))
148
+ weights.append(segment_weight)
149
+ visibilities.append(seg_vis)
150
+
151
+
152
+ def _add_foot_segment(
153
+ segments: list,
154
+ weights: list,
155
+ visibilities: list,
156
+ landmarks: dict[str, tuple[float, float, float]],
157
+ side: str,
158
+ vis_threshold: float,
159
+ ) -> None:
160
+ """Add foot segment (1.5% body mass per foot) if visible."""
161
+ foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
162
+ foot_pos = [
163
+ (x, y, vis)
164
+ for key in foot_keys
165
+ if key in landmarks
166
+ for x, y, vis in [landmarks[key]]
167
+ if vis > vis_threshold
168
+ ]
169
+ if foot_pos:
170
+ foot_x = float(np.mean([p[0] for p in foot_pos]))
171
+ foot_y = float(np.mean([p[1] for p in foot_pos]))
172
+ foot_vis = float(np.mean([p[2] for p in foot_pos]))
173
+ segments.append((foot_x, foot_y))
174
+ weights.append(0.015)
175
+ visibilities.append(foot_vis)
176
+
177
+
84
178
  def compute_center_of_mass(
85
179
  landmarks: dict[str, tuple[float, float, float]],
86
180
  visibility_threshold: float = 0.5,
@@ -106,114 +200,59 @@ def compute_center_of_mass(
106
200
  (x, y, visibility) tuple for estimated CoM position
107
201
  visibility = average visibility of all segments used
108
202
  """
109
- # Define segment representatives and their weights (as fraction of body mass)
110
- # Each segment uses midpoint or average of its bounding landmarks
111
- segments = []
112
- segment_weights = []
113
- visibilities = []
203
+ segments: list = []
204
+ weights: list = []
205
+ visibilities: list = []
114
206
 
115
- # Head segment: 8% (use nose as proxy)
116
- if "nose" in landmarks:
117
- x, y, vis = landmarks["nose"]
118
- if vis > visibility_threshold:
119
- segments.append((x, y))
120
- segment_weights.append(0.08)
121
- visibilities.append(vis)
207
+ # Add body segments
208
+ _add_head_segment(segments, weights, visibilities, landmarks, visibility_threshold)
209
+ _add_trunk_segment(segments, weights, visibilities, landmarks, visibility_threshold)
122
210
 
123
- # Trunk segment: 50% (midpoint between shoulders and hips)
124
- trunk_landmarks = ["left_shoulder", "right_shoulder", "left_hip", "right_hip"]
125
- trunk_positions = [
126
- (x, y, vis)
127
- for key in trunk_landmarks
128
- if key in landmarks
129
- for x, y, vis in [landmarks[key]]
130
- if vis > visibility_threshold
131
- ]
132
- if len(trunk_positions) >= 2:
133
- trunk_x = float(np.mean([pos[0] for pos in trunk_positions]))
134
- trunk_y = float(np.mean([pos[1] for pos in trunk_positions]))
135
- trunk_vis = float(np.mean([pos[2] for pos in trunk_positions]))
136
- segments.append((trunk_x, trunk_y))
137
- segment_weights.append(0.50)
138
- visibilities.append(trunk_vis)
139
-
140
- # Thigh segment: 20% total (midpoint hip to knee for each leg)
211
+ # Add bilateral limb segments
141
212
  for side in ["left", "right"]:
142
- hip_key = f"{side}_hip"
143
- knee_key = f"{side}_knee"
144
- if hip_key in landmarks and knee_key in landmarks:
145
- hip_x, hip_y, hip_vis = landmarks[hip_key]
146
- knee_x, knee_y, knee_vis = landmarks[knee_key]
147
- if hip_vis > visibility_threshold and knee_vis > visibility_threshold:
148
- thigh_x = (hip_x + knee_x) / 2
149
- thigh_y = (hip_y + knee_y) / 2
150
- thigh_vis = (hip_vis + knee_vis) / 2
151
- segments.append((thigh_x, thigh_y))
152
- segment_weights.append(0.10) # 10% per leg
153
- visibilities.append(thigh_vis)
154
-
155
- # Lower leg segment: 10% total (midpoint knee to ankle for each leg)
156
- for side in ["left", "right"]:
157
- knee_key = f"{side}_knee"
158
- ankle_key = f"{side}_ankle"
159
- if knee_key in landmarks and ankle_key in landmarks:
160
- knee_x, knee_y, knee_vis = landmarks[knee_key]
161
- ankle_x, ankle_y, ankle_vis = landmarks[ankle_key]
162
- if knee_vis > visibility_threshold and ankle_vis > visibility_threshold:
163
- leg_x = (knee_x + ankle_x) / 2
164
- leg_y = (knee_y + ankle_y) / 2
165
- leg_vis = (knee_vis + ankle_vis) / 2
166
- segments.append((leg_x, leg_y))
167
- segment_weights.append(0.05) # 5% per leg
168
- visibilities.append(leg_vis)
169
-
170
- # Foot segment: 3% total (average of ankle, heel, foot_index)
171
- for side in ["left", "right"]:
172
- foot_keys = [f"{side}_ankle", f"{side}_heel", f"{side}_foot_index"]
173
- foot_positions = [
174
- (x, y, vis)
175
- for key in foot_keys
176
- if key in landmarks
177
- for x, y, vis in [landmarks[key]]
178
- if vis > visibility_threshold
179
- ]
180
- if foot_positions:
181
- foot_x = float(np.mean([pos[0] for pos in foot_positions]))
182
- foot_y = float(np.mean([pos[1] for pos in foot_positions]))
183
- foot_vis = float(np.mean([pos[2] for pos in foot_positions]))
184
- segments.append((foot_x, foot_y))
185
- segment_weights.append(0.015) # 1.5% per foot
186
- visibilities.append(foot_vis)
187
-
188
- # If no segments found, fall back to hip average
213
+ _add_limb_segment(
214
+ segments,
215
+ weights,
216
+ visibilities,
217
+ landmarks,
218
+ side,
219
+ "hip",
220
+ "knee",
221
+ 0.10,
222
+ visibility_threshold,
223
+ )
224
+ _add_limb_segment(
225
+ segments,
226
+ weights,
227
+ visibilities,
228
+ landmarks,
229
+ side,
230
+ "knee",
231
+ "ankle",
232
+ 0.05,
233
+ visibility_threshold,
234
+ )
235
+ _add_foot_segment(
236
+ segments, weights, visibilities, landmarks, side, visibility_threshold
237
+ )
238
+
239
+ # Fallback if no segments found
189
240
  if not segments:
190
241
  if "left_hip" in landmarks and "right_hip" in landmarks:
191
242
  lh_x, lh_y, lh_vis = landmarks["left_hip"]
192
243
  rh_x, rh_y, rh_vis = landmarks["right_hip"]
193
- return (
194
- (lh_x + rh_x) / 2,
195
- (lh_y + rh_y) / 2,
196
- (lh_vis + rh_vis) / 2,
197
- )
198
- # Ultimate fallback: center of frame
244
+ return ((lh_x + rh_x) / 2, (lh_y + rh_y) / 2, (lh_vis + rh_vis) / 2)
199
245
  return (0.5, 0.5, 0.0)
200
246
 
201
- # Normalize weights to sum to 1.0
202
- total_weight = sum(segment_weights)
203
- normalized_weights = [w / total_weight for w in segment_weights]
247
+ # Normalize weights and compute weighted average
248
+ total_weight = sum(weights)
249
+ normalized_weights = [w / total_weight for w in weights]
204
250
 
205
- # Compute weighted average of segment positions
206
251
  com_x = float(
207
- sum(
208
- pos[0] * weight
209
- for pos, weight in zip(segments, normalized_weights, strict=True)
210
- )
252
+ sum(p[0] * w for p, w in zip(segments, normalized_weights, strict=True))
211
253
  )
212
254
  com_y = float(
213
- sum(
214
- pos[1] * weight
215
- for pos, weight in zip(segments, normalized_weights, strict=True)
216
- )
255
+ sum(p[1] * w for p, w in zip(segments, normalized_weights, strict=True))
217
256
  )
218
257
  com_visibility = float(np.mean(visibilities)) if visibilities else 0.0
219
258
 
@@ -117,7 +117,7 @@ def _store_smoothed_landmarks(
117
117
  )
118
118
 
119
119
 
120
- def _smooth_landmarks_core(
120
+ def _smooth_landmarks_core( # NOSONAR(S1172) - polyorder used via closure capture in smoother_fn
121
121
  landmark_sequence: list[dict[str, tuple[float, float, float]] | None],
122
122
  window_length: int,
123
123
  polyorder: int,
@@ -129,7 +129,7 @@ def _smooth_landmarks_core(
129
129
  Args:
130
130
  landmark_sequence: List of landmark dictionaries from each frame
131
131
  window_length: Length of filter window (must be odd)
132
- polyorder: Order of polynomial used to fit samples
132
+ polyorder: Order of polynomial used to fit samples (captured by smoother_fn closure)
133
133
  smoother_fn: Function that takes (x_coords, y_coords, valid_frames)
134
134
  and returns (x_smooth, y_smooth)
135
135
 
@@ -89,6 +89,87 @@ def calculate_adaptive_threshold(
89
89
  return adaptive_threshold
90
90
 
91
91
 
92
+ def _find_stable_baseline(
93
+ positions: np.ndarray,
94
+ min_stable_frames: int,
95
+ stability_threshold: float = 0.01,
96
+ debug: bool = False,
97
+ ) -> tuple[int, float]:
98
+ """Find first stable period and return baseline position.
99
+
100
+ Returns:
101
+ Tuple of (baseline_start_frame, baseline_position). Returns (-1, 0.0) if not found.
102
+ """
103
+ stable_window = min_stable_frames
104
+
105
+ for start_idx in range(0, len(positions) - stable_window, 5):
106
+ window = positions[start_idx : start_idx + stable_window]
107
+ window_std = float(np.std(window))
108
+
109
+ if window_std < stability_threshold:
110
+ baseline_start = start_idx
111
+ baseline_position = float(np.median(window))
112
+
113
+ if debug:
114
+ end_frame = baseline_start + stable_window - 1
115
+ print("[detect_drop_start] Found stable period:")
116
+ print(f" frames {baseline_start}-{end_frame}")
117
+ print(f" baseline_position: {baseline_position:.4f}")
118
+ print(f" baseline_std: {window_std:.4f} < {stability_threshold:.4f}")
119
+
120
+ return baseline_start, baseline_position
121
+
122
+ if debug:
123
+ print(
124
+ f"[detect_drop_start] No stable period found "
125
+ f"(variance always > {stability_threshold:.4f})"
126
+ )
127
+ return -1, 0.0
128
+
129
+
130
+ def _find_drop_from_baseline(
131
+ positions: np.ndarray,
132
+ baseline_start: int,
133
+ baseline_position: float,
134
+ stable_window: int,
135
+ position_change_threshold: float,
136
+ smoothing_window: int,
137
+ debug: bool = False,
138
+ ) -> int:
139
+ """Find drop start after stable baseline period.
140
+
141
+ Returns:
142
+ Drop frame index, or 0 if not found.
143
+ """
144
+ search_start = baseline_start + stable_window
145
+ window_size = max(3, smoothing_window)
146
+
147
+ for i in range(search_start, len(positions) - window_size):
148
+ window_positions = positions[i : i + window_size]
149
+ avg_position = float(np.mean(window_positions))
150
+ position_change = avg_position - baseline_position
151
+
152
+ if position_change > position_change_threshold:
153
+ drop_frame = max(baseline_start, i - window_size)
154
+
155
+ if debug:
156
+ print(f"[detect_drop_start] Drop detected at frame {drop_frame}")
157
+ print(
158
+ f" position_change: {position_change:.4f} > "
159
+ f"{position_change_threshold:.4f}"
160
+ )
161
+ print(
162
+ f" avg_position: {avg_position:.4f} vs "
163
+ f"baseline: {baseline_position:.4f}"
164
+ )
165
+
166
+ return drop_frame
167
+
168
+ if debug:
169
+ print("[detect_drop_start] No drop detected after stable period")
170
+ return 0
171
+
172
+
92
173
  def detect_drop_start(
93
174
  positions: np.ndarray,
94
175
  fps: float,
@@ -126,84 +207,32 @@ def detect_drop_start(
126
207
  - Returns: 119
127
208
  """
128
209
  min_stable_frames = int(fps * min_stationary_duration)
129
- if len(positions) < min_stable_frames + 30: # Need some frames after stable period
210
+ if len(positions) < min_stable_frames + 30:
130
211
  if debug:
131
- min_frames_needed = min_stable_frames + 30
132
212
  print(
133
- f"[detect_drop_start] Video too short: {len(positions)} < {min_frames_needed}"
213
+ f"[detect_drop_start] Video too short: {len(positions)} < "
214
+ f"{min_stable_frames + 30}"
134
215
  )
135
216
  return 0
136
217
 
137
- # STEP 1: Find first stable period by scanning forward
138
- # Look for window with low variance (< 1% of frame height)
139
- stability_threshold = 0.01 # 1% of frame height
140
- stable_window = min_stable_frames
141
-
142
- baseline_start = -1
143
- baseline_position = 0.0
144
-
145
- # Scan from start, looking for stable window
146
- for start_idx in range(0, len(positions) - stable_window, 5): # Step by 5 frames
147
- window = positions[start_idx : start_idx + stable_window]
148
- window_std = float(np.std(window))
149
-
150
- if window_std < stability_threshold:
151
- # Found stable period!
152
- baseline_start = start_idx
153
- baseline_position = float(np.median(window))
154
-
155
- if debug:
156
- end_frame = baseline_start + stable_window - 1
157
- print("[detect_drop_start] Found stable period:")
158
- print(f" frames {baseline_start}-{end_frame}")
159
- print(f" baseline_position: {baseline_position:.4f}")
160
- print(f" baseline_std: {window_std:.4f} < {stability_threshold:.4f}")
161
- break
218
+ # Find stable baseline period
219
+ baseline_start, baseline_position = _find_stable_baseline(
220
+ positions, min_stable_frames, debug=debug
221
+ )
162
222
 
163
223
  if baseline_start < 0:
164
- if debug:
165
- msg = (
166
- f"No stable period found (variance always > {stability_threshold:.4f})"
167
- )
168
- print(f"[detect_drop_start] {msg}")
169
224
  return 0
170
225
 
171
- # STEP 2: Find when position changes significantly from baseline
172
- # Start searching after stable period ends
173
- search_start = baseline_start + stable_window
174
- window_size = max(3, smoothing_window)
175
-
176
- for i in range(search_start, len(positions) - window_size):
177
- # Average position over small window to reduce noise
178
- window_positions = positions[i : i + window_size]
179
- avg_position = float(np.mean(window_positions))
180
-
181
- # Check if position has increased (dropped) significantly
182
- position_change = avg_position - baseline_position
183
-
184
- if position_change > position_change_threshold:
185
- # Found start of drop - back up slightly to catch beginning
186
- drop_frame_candidate = i - window_size
187
- if drop_frame_candidate < baseline_start:
188
- drop_frame = baseline_start
189
- else:
190
- drop_frame = drop_frame_candidate
191
-
192
- if debug:
193
- print(f"[detect_drop_start] Drop detected at frame {drop_frame}")
194
- print(
195
- f" position_change: {position_change:.4f} > {position_change_threshold:.4f}"
196
- )
197
- print(
198
- f" avg_position: {avg_position:.4f} vs baseline: {baseline_position:.4f}"
199
- )
200
-
201
- return drop_frame
202
-
203
- # No significant position change detected
204
- if debug:
205
- print("[detect_drop_start] No drop detected after stable period")
206
- return 0
226
+ # Find drop from baseline
227
+ return _find_drop_from_baseline(
228
+ positions,
229
+ baseline_start,
230
+ baseline_position,
231
+ min_stable_frames,
232
+ position_change_threshold,
233
+ smoothing_window,
234
+ debug,
235
+ )
207
236
 
208
237
 
209
238
  def detect_ground_contact(
@@ -349,6 +378,71 @@ def interpolate_threshold_crossing(
349
378
  return float(max(0.0, min(1.0, t)))
350
379
 
351
380
 
381
+ def _interpolate_phase_start(
382
+ start_idx: int,
383
+ state: ContactState,
384
+ velocities: np.ndarray,
385
+ velocity_threshold: float,
386
+ ) -> float:
387
+ """Interpolate start boundary of a phase with sub-frame precision.
388
+
389
+ Returns:
390
+ Fractional start frame, or float(start_idx) if no interpolation.
391
+ """
392
+ if start_idx <= 0 or start_idx >= len(velocities):
393
+ return float(start_idx)
394
+
395
+ vel_before = velocities[start_idx - 1]
396
+ vel_at = velocities[start_idx]
397
+
398
+ # Check threshold crossing based on state
399
+ is_landing = (
400
+ state == ContactState.ON_GROUND and vel_before > velocity_threshold > vel_at
401
+ )
402
+ is_takeoff = (
403
+ state == ContactState.IN_AIR and vel_before < velocity_threshold < vel_at
404
+ )
405
+
406
+ if is_landing or is_takeoff:
407
+ offset = interpolate_threshold_crossing(vel_before, vel_at, velocity_threshold)
408
+ return (start_idx - 1) + offset
409
+
410
+ return float(start_idx)
411
+
412
+
413
+ def _interpolate_phase_end(
414
+ end_idx: int,
415
+ state: ContactState,
416
+ velocities: np.ndarray,
417
+ velocity_threshold: float,
418
+ max_idx: int,
419
+ ) -> float:
420
+ """Interpolate end boundary of a phase with sub-frame precision.
421
+
422
+ Returns:
423
+ Fractional end frame, or float(end_idx) if no interpolation.
424
+ """
425
+ if end_idx >= max_idx - 1 or end_idx + 1 >= len(velocities):
426
+ return float(end_idx)
427
+
428
+ vel_at = velocities[end_idx]
429
+ vel_after = velocities[end_idx + 1]
430
+
431
+ # Check threshold crossing based on state
432
+ is_takeoff = (
433
+ state == ContactState.ON_GROUND and vel_at < velocity_threshold < vel_after
434
+ )
435
+ is_landing = (
436
+ state == ContactState.IN_AIR and vel_at > velocity_threshold > vel_after
437
+ )
438
+
439
+ if is_takeoff or is_landing:
440
+ offset = interpolate_threshold_crossing(vel_at, vel_after, velocity_threshold)
441
+ return end_idx + offset
442
+
443
+ return float(end_idx)
444
+
445
+
352
446
  def find_interpolated_phase_transitions(
353
447
  foot_positions: np.ndarray,
354
448
  contact_states: list[ContactState],
@@ -371,13 +465,10 @@ def find_interpolated_phase_transitions(
371
465
  Returns:
372
466
  List of (start_frame, end_frame, state) tuples with fractional frame indices
373
467
  """
374
- # First get integer frame phases
375
468
  phases = find_contact_phases(contact_states)
376
469
  if not phases or len(foot_positions) < 2:
377
470
  return []
378
471
 
379
- # Compute velocities from derivative of smoothed trajectory
380
- # This gives much smoother velocity estimates than simple frame differences
381
472
  velocities = compute_velocity_from_derivative(
382
473
  foot_positions, window_length=smoothing_window, polyorder=2
383
474
  )
@@ -385,57 +476,12 @@ def find_interpolated_phase_transitions(
385
476
  interpolated_phases: list[tuple[float, float, ContactState]] = []
386
477
 
387
478
  for start_idx, end_idx, state in phases:
388
- start_frac = float(start_idx)
389
- end_frac = float(end_idx)
390
-
391
- # Interpolate start boundary (transition INTO this phase)
392
- if start_idx > 0 and start_idx < len(velocities):
393
- vel_before = (
394
- velocities[start_idx - 1] if start_idx > 0 else velocities[start_idx]
395
- )
396
- vel_at = velocities[start_idx]
397
-
398
- # Check if we're crossing the threshold at this boundary
399
- if state == ContactState.ON_GROUND:
400
- # Transition air→ground: velocity dropping below threshold
401
- if vel_before > velocity_threshold > vel_at:
402
- # Interpolate between start_idx-1 and start_idx
403
- offset = interpolate_threshold_crossing(
404
- vel_before, vel_at, velocity_threshold
405
- )
406
- start_frac = (start_idx - 1) + offset
407
- elif state == ContactState.IN_AIR:
408
- # Transition ground→air: velocity rising above threshold
409
- if vel_before < velocity_threshold < vel_at:
410
- # Interpolate between start_idx-1 and start_idx
411
- offset = interpolate_threshold_crossing(
412
- vel_before, vel_at, velocity_threshold
413
- )
414
- start_frac = (start_idx - 1) + offset
415
-
416
- # Interpolate end boundary (transition OUT OF this phase)
417
- if end_idx < len(foot_positions) - 1 and end_idx + 1 < len(velocities):
418
- vel_at = velocities[end_idx]
419
- vel_after = velocities[end_idx + 1]
420
-
421
- # Check if we're crossing the threshold at this boundary
422
- if state == ContactState.ON_GROUND:
423
- # Transition ground→air: velocity rising above threshold
424
- if vel_at < velocity_threshold < vel_after:
425
- # Interpolate between end_idx and end_idx+1
426
- offset = interpolate_threshold_crossing(
427
- vel_at, vel_after, velocity_threshold
428
- )
429
- end_frac = end_idx + offset
430
- elif state == ContactState.IN_AIR:
431
- # Transition air→ground: velocity dropping below threshold
432
- if vel_at > velocity_threshold > vel_after:
433
- # Interpolate between end_idx and end_idx+1
434
- offset = interpolate_threshold_crossing(
435
- vel_at, vel_after, velocity_threshold
436
- )
437
- end_frac = end_idx + offset
438
-
479
+ start_frac = _interpolate_phase_start(
480
+ start_idx, state, velocities, velocity_threshold
481
+ )
482
+ end_frac = _interpolate_phase_end(
483
+ end_idx, state, velocities, velocity_threshold, len(foot_positions)
484
+ )
439
485
  interpolated_phases.append((start_frac, end_frac, state))
440
486
 
441
487
  return interpolated_phases
@@ -413,13 +413,11 @@ def _process_single(
413
413
  contact_states,
414
414
  vertical_positions,
415
415
  video.fps,
416
- drop_height_m=None,
417
416
  drop_start_frame=expert_params.drop_start_frame,
418
417
  velocity_threshold=params.velocity_threshold,
419
418
  smoothing_window=params.smoothing_window,
420
419
  polyorder=params.polyorder,
421
420
  use_curvature=params.use_curvature,
422
- kinematic_correction_factor=1.0,
423
421
  )
424
422
 
425
423
  # Output metrics
@@ -12,6 +12,109 @@ from .kinematics import DropJumpMetrics
12
12
  class DebugOverlayRenderer(BaseDebugOverlayRenderer):
13
13
  """Renders debug information on video frames."""
14
14
 
15
+ def _draw_com_visualization(
16
+ self,
17
+ frame: np.ndarray,
18
+ landmarks: dict[str, tuple[float, float, float]],
19
+ contact_state: ContactState,
20
+ ) -> None:
21
+ """Draw center of mass visualization on frame."""
22
+ com_x, com_y, _ = compute_center_of_mass(landmarks)
23
+ px = int(com_x * self.width)
24
+ py = int(com_y * self.height)
25
+
26
+ color = (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
27
+ cv2.circle(frame, (px, py), 15, color, -1)
28
+ cv2.circle(frame, (px, py), 17, (255, 255, 255), 2)
29
+
30
+ # Draw hip midpoint reference
31
+ if "left_hip" in landmarks and "right_hip" in landmarks:
32
+ lh_x, lh_y, _ = landmarks["left_hip"]
33
+ rh_x, rh_y, _ = landmarks["right_hip"]
34
+ hip_x = int((lh_x + rh_x) / 2 * self.width)
35
+ hip_y = int((lh_y + rh_y) / 2 * self.height)
36
+ cv2.circle(frame, (hip_x, hip_y), 8, (255, 165, 0), -1)
37
+ cv2.line(frame, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
38
+
39
+ def _draw_foot_visualization(
40
+ self,
41
+ frame: np.ndarray,
42
+ landmarks: dict[str, tuple[float, float, float]],
43
+ contact_state: ContactState,
44
+ ) -> None:
45
+ """Draw foot position visualization on frame."""
46
+ foot_x, foot_y = compute_average_foot_position(landmarks)
47
+ px = int(foot_x * self.width)
48
+ py = int(foot_y * self.height)
49
+
50
+ color = (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
51
+ cv2.circle(frame, (px, py), 10, color, -1)
52
+
53
+ # Draw individual foot landmarks
54
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
55
+ for key in foot_keys:
56
+ if key in landmarks:
57
+ x, y, vis = landmarks[key]
58
+ if vis > 0.5:
59
+ lx = int(x * self.width)
60
+ ly = int(y * self.height)
61
+ cv2.circle(frame, (lx, ly), 5, (255, 255, 0), -1)
62
+
63
+ def _draw_phase_labels(
64
+ self,
65
+ frame: np.ndarray,
66
+ frame_idx: int,
67
+ metrics: DropJumpMetrics,
68
+ ) -> None:
69
+ """Draw phase labels (ground contact, flight, peak) on frame."""
70
+ y_offset = 110
71
+
72
+ # Ground contact phase
73
+ if (
74
+ metrics.contact_start_frame
75
+ and metrics.contact_end_frame
76
+ and metrics.contact_start_frame <= frame_idx <= metrics.contact_end_frame
77
+ ):
78
+ cv2.putText(
79
+ frame,
80
+ "GROUND CONTACT",
81
+ (10, y_offset),
82
+ cv2.FONT_HERSHEY_SIMPLEX,
83
+ 0.7,
84
+ (0, 255, 0),
85
+ 2,
86
+ )
87
+ y_offset += 40
88
+
89
+ # Flight phase
90
+ if (
91
+ metrics.flight_start_frame
92
+ and metrics.flight_end_frame
93
+ and metrics.flight_start_frame <= frame_idx <= metrics.flight_end_frame
94
+ ):
95
+ cv2.putText(
96
+ frame,
97
+ "FLIGHT PHASE",
98
+ (10, y_offset),
99
+ cv2.FONT_HERSHEY_SIMPLEX,
100
+ 0.7,
101
+ (0, 0, 255),
102
+ 2,
103
+ )
104
+ y_offset += 40
105
+
106
+ # Peak height
107
+ if metrics.peak_height_frame == frame_idx:
108
+ cv2.putText(
109
+ frame,
110
+ "PEAK HEIGHT",
111
+ (10, y_offset),
112
+ cv2.FONT_HERSHEY_SIMPLEX,
113
+ 0.7,
114
+ (255, 0, 255),
115
+ 2,
116
+ )
117
+
15
118
  def render_frame(
16
119
  self,
17
120
  frame: np.ndarray,
@@ -37,67 +140,20 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
37
140
  """
38
141
  annotated = frame.copy()
39
142
 
40
- # Draw landmarks if available
143
+ # Draw landmarks
41
144
  if landmarks:
42
145
  if use_com:
43
- # Draw center of mass position
44
- com_x, com_y, _ = compute_center_of_mass(landmarks) # com_vis not used
45
- px = int(com_x * self.width)
46
- py = int(com_y * self.height)
47
-
48
- # Draw CoM with larger circle
49
- color = (
50
- (0, 255, 0)
51
- if contact_state == ContactState.ON_GROUND
52
- else (0, 0, 255)
53
- )
54
- cv2.circle(annotated, (px, py), 15, color, -1)
55
- cv2.circle(annotated, (px, py), 17, (255, 255, 255), 2) # White border
56
-
57
- # Draw body segments for reference
58
- # Draw hip midpoint
59
- if "left_hip" in landmarks and "right_hip" in landmarks:
60
- lh_x, lh_y, _ = landmarks["left_hip"]
61
- rh_x, rh_y, _ = landmarks["right_hip"]
62
- hip_x = int((lh_x + rh_x) / 2 * self.width)
63
- hip_y = int((lh_y + rh_y) / 2 * self.height)
64
- cv2.circle(
65
- annotated, (hip_x, hip_y), 8, (255, 165, 0), -1
66
- ) # Orange
67
- # Draw line from hip to CoM
68
- cv2.line(annotated, (hip_x, hip_y), (px, py), (255, 165, 0), 2)
146
+ self._draw_com_visualization(annotated, landmarks, contact_state)
69
147
  else:
70
- # Draw foot position (original method)
71
- foot_x, foot_y = compute_average_foot_position(landmarks)
72
- px = int(foot_x * self.width)
73
- py = int(foot_y * self.height)
74
-
75
- # Draw foot position circle
76
- color = (
77
- (0, 255, 0)
78
- if contact_state == ContactState.ON_GROUND
79
- else (0, 0, 255)
80
- )
81
- cv2.circle(annotated, (px, py), 10, color, -1)
82
-
83
- # Draw individual foot landmarks
84
- foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
85
- for key in foot_keys:
86
- if key in landmarks:
87
- x, y, vis = landmarks[key]
88
- if vis > 0.5:
89
- lx = int(x * self.width)
90
- ly = int(y * self.height)
91
- cv2.circle(annotated, (lx, ly), 5, (255, 255, 0), -1)
148
+ self._draw_foot_visualization(annotated, landmarks, contact_state)
92
149
 
93
150
  # Draw contact state
94
- state_text = f"State: {contact_state.value}"
95
151
  state_color = (
96
152
  (0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
97
153
  )
98
154
  cv2.putText(
99
155
  annotated,
100
- state_text,
156
+ f"State: {contact_state.value}",
101
157
  (10, 30),
102
158
  cv2.FONT_HERSHEY_SIMPLEX,
103
159
  1,
@@ -116,52 +172,8 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
116
172
  2,
117
173
  )
118
174
 
119
- # Draw metrics if in relevant phase
175
+ # Draw phase labels
120
176
  if metrics:
121
- y_offset = 110
122
- if (
123
- metrics.contact_start_frame
124
- and metrics.contact_end_frame
125
- and metrics.contact_start_frame
126
- <= frame_idx
127
- <= metrics.contact_end_frame
128
- ):
129
- cv2.putText(
130
- annotated,
131
- "GROUND CONTACT",
132
- (10, y_offset),
133
- cv2.FONT_HERSHEY_SIMPLEX,
134
- 0.7,
135
- (0, 255, 0),
136
- 2,
137
- )
138
- y_offset += 40
139
-
140
- if (
141
- metrics.flight_start_frame
142
- and metrics.flight_end_frame
143
- and metrics.flight_start_frame <= frame_idx <= metrics.flight_end_frame
144
- ):
145
- cv2.putText(
146
- annotated,
147
- "FLIGHT PHASE",
148
- (10, y_offset),
149
- cv2.FONT_HERSHEY_SIMPLEX,
150
- 0.7,
151
- (0, 0, 255),
152
- 2,
153
- )
154
- y_offset += 40
155
-
156
- if metrics.peak_height_frame == frame_idx:
157
- cv2.putText(
158
- annotated,
159
- "PEAK HEIGHT",
160
- (10, y_offset),
161
- cv2.FONT_HERSHEY_SIMPLEX,
162
- 0.7,
163
- (255, 0, 255),
164
- 2,
165
- )
177
+ self._draw_phase_labels(annotated, frame_idx, metrics)
166
178
 
167
179
  return annotated
@@ -343,29 +343,26 @@ def calculate_drop_jump_metrics(
343
343
  contact_states: list[ContactState],
344
344
  foot_y_positions: np.ndarray,
345
345
  fps: float,
346
- drop_height_m: float | None = None,
347
346
  drop_start_frame: int | None = None,
348
347
  velocity_threshold: float = 0.02,
349
348
  smoothing_window: int = 5,
350
349
  polyorder: int = 2,
351
350
  use_curvature: bool = True,
352
- kinematic_correction_factor: float = 1.0,
353
351
  ) -> DropJumpMetrics:
354
352
  """
355
353
  Calculate drop-jump metrics from contact states and positions.
356
354
 
355
+ Jump height is calculated from flight time using kinematic formula: h = g × t² / 8
356
+
357
357
  Args:
358
358
  contact_states: Contact state for each frame
359
359
  foot_y_positions: Vertical positions of feet (normalized 0-1)
360
360
  fps: Video frame rate
361
- drop_height_m: Known drop box/platform height in meters for calibration (optional)
361
+ drop_start_frame: Optional manual drop start frame
362
362
  velocity_threshold: Velocity threshold used for contact detection (for interpolation)
363
363
  smoothing_window: Window size for velocity/acceleration smoothing (must be odd)
364
364
  polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
365
365
  use_curvature: Whether to use curvature analysis for refining transitions
366
- kinematic_correction_factor: Correction factor for kinematic jump height calculation
367
- (default: 1.0 = no correction). Historical testing suggested 1.35, but this is
368
- unvalidated. Use calibrated measurement (--drop-height) for validated results.
369
366
 
370
367
  Returns:
371
368
  DropJumpMetrics object with calculated values
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.12.1
3
+ Version: 0.12.2
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,5 +1,5 @@
1
1
  kinemotion/__init__.py,sha256=REBC9wrwYC_grvCS00qEOyign65Zc1sc-5buLpyqQxA,654
2
- kinemotion/api.py,sha256=-m4KnlpXWswNyHc_keuNtKZKy8EONSUqrSTNtvoq_OA,31193
2
+ kinemotion/api.py,sha256=jcUVn8UHysj8GNzdNShf3y2pp20G4iTBb1jvkpptUvU,31116
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
5
  kinemotion/cmj/analysis.py,sha256=4HYGn4VDIB6oExAees-VcPfpNgWOltpgwjyNTU7YAb4,18263
@@ -12,17 +12,17 @@ kinemotion/core/auto_tuning.py,sha256=cvmxUI-CbahpOJQtR2r5jOx4Q6yKPe3DO1o15hOQId
12
12
  kinemotion/core/cli_utils.py,sha256=Pq1JF7yvK1YbH0tOUWKjplthCbWsJQt4Lv7esPYH4FM,7254
13
13
  kinemotion/core/debug_overlay_utils.py,sha256=TyUb5okv5qw8oeaX3jsUO_kpwf1NnaHEAOTm-8LwTno,4587
14
14
  kinemotion/core/filtering.py,sha256=f-m-aA59e4WqE6u-9MA51wssu7rI-Y_7n1cG8IWdeRQ,11241
15
- kinemotion/core/pose.py,sha256=Wfd1RR-2ZznYpWeQUbySwcV3mvReqn8n3XO6S7pGq4M,8390
16
- kinemotion/core/smoothing.py,sha256=Zdhqw4NyCrZaEb-Jo3sASzP-QlEL5sVTgHoXU8zT_xU,14136
15
+ kinemotion/core/pose.py,sha256=ztemdZ_ysVVK3gbXabm8qS_dr1VfJX9KZjmcO-Z-iNE,8532
16
+ kinemotion/core/smoothing.py,sha256=C9GK3PAN16RpqJw2UWeVslSTJZEvALeVADjtnJnSF88,14240
17
17
  kinemotion/core/video_io.py,sha256=UtmUndw22uFnZBK_BmeE912yRYH1YnU_P8LjuN33DPc,6461
18
18
  kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
19
- kinemotion/dropjump/analysis.py,sha256=crO0SUq8TiMHdK5hPuHHuOFrEGhGdPoeb5rXQUvqCog,26103
20
- kinemotion/dropjump/cli.py,sha256=emnMlg2Td4iS7go9ckTFnolPEytX9MKoPRhfjBwyArU,21731
21
- kinemotion/dropjump/debug_overlay.py,sha256=2L4VAZwWFnaOQ7LAF3ALXCjEaVNzkfpLT5-h0qKL_6g,5707
22
- kinemotion/dropjump/kinematics.py,sha256=WC1HuIKx3CfEg9m9jFME74IAHSJRcbqo2_yysZIBqJw,15880
19
+ kinemotion/dropjump/analysis.py,sha256=PoBzlqciBFB_O7ejdjBhpnk19a_VoD31tDjXuN1-ovo,25764
20
+ kinemotion/dropjump/cli.py,sha256=90GddzgMLwEKKwcG0VW94HeXFwEK5zSJm6w6UkPbaRk,21646
21
+ kinemotion/dropjump/debug_overlay.py,sha256=LkPw6ucb7beoYWS4L-Lvjs1KLCm5wAWDAfiznUeV2IQ,5668
22
+ kinemotion/dropjump/kinematics.py,sha256=txDxpDti3VJVctWGbe3aIrlIx83UY8-ynzlX01TOvTA,15577
23
23
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
- kinemotion-0.12.1.dist-info/METADATA,sha256=50i4pFgm8p1uND7dmF4WhjQu5FL0p60qeARx29b39QY,18990
25
- kinemotion-0.12.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
- kinemotion-0.12.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
27
- kinemotion-0.12.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
28
- kinemotion-0.12.1.dist-info/RECORD,,
24
+ kinemotion-0.12.2.dist-info/METADATA,sha256=FE1-EfYL73UDQE9xBj-qRCQ51PN6F1SylDmVJrt974s,18990
25
+ kinemotion-0.12.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
+ kinemotion-0.12.2.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
27
+ kinemotion-0.12.2.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
28
+ kinemotion-0.12.2.dist-info/RECORD,,