kinemotion 0.25.0__py3-none-any.whl → 0.26.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kinemotion/api.py CHANGED
@@ -244,6 +244,20 @@ def _apply_smoothing(
244
244
  )
245
245
 
246
246
 
247
+ def _calculate_foot_visibility(frame_landmarks: dict) -> float:
248
+ """Calculate average visibility of foot landmarks.
249
+
250
+ Args:
251
+ frame_landmarks: Dictionary of landmarks for a frame
252
+
253
+ Returns:
254
+ Average visibility value (0-1)
255
+ """
256
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
257
+ foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
258
+ return float(np.mean(foot_vis)) if foot_vis else 0.0
259
+
260
+
247
261
  def _extract_vertical_positions(
248
262
  smoothed_landmarks: list,
249
263
  ) -> tuple[np.ndarray, np.ndarray]:
@@ -262,13 +276,7 @@ def _extract_vertical_positions(
262
276
  if frame_landmarks:
263
277
  _, foot_y = compute_average_foot_position(frame_landmarks)
264
278
  position_list.append(foot_y)
265
-
266
- # Average visibility of foot landmarks
267
- foot_vis = []
268
- for key in ["left_ankle", "right_ankle", "left_heel", "right_heel"]:
269
- if key in frame_landmarks:
270
- foot_vis.append(frame_landmarks[key][2])
271
- visibilities_list.append(float(np.mean(foot_vis)) if foot_vis else 0.0)
279
+ visibilities_list.append(_calculate_foot_visibility(frame_landmarks))
272
280
  else:
273
281
  position_list.append(position_list[-1] if position_list else 0.5)
274
282
  visibilities_list.append(0.0)
@@ -235,6 +235,86 @@ def detect_drop_start(
235
235
  )
236
236
 
237
237
 
238
+ def _filter_stationary_with_visibility(
239
+ is_stationary: np.ndarray,
240
+ visibilities: np.ndarray | None,
241
+ visibility_threshold: float,
242
+ ) -> np.ndarray:
243
+ """Apply visibility filter to stationary flags.
244
+
245
+ Args:
246
+ is_stationary: Boolean array indicating stationary frames
247
+ visibilities: Optional visibility scores for each frame
248
+ visibility_threshold: Minimum visibility to trust landmark
249
+
250
+ Returns:
251
+ Filtered is_stationary array
252
+ """
253
+ if visibilities is not None:
254
+ is_visible = visibilities > visibility_threshold
255
+ return is_stationary & is_visible
256
+ return is_stationary
257
+
258
+
259
+ def _find_contact_frames(
260
+ is_stationary: np.ndarray,
261
+ min_contact_frames: int,
262
+ ) -> set[int]:
263
+ """Find frames with sustained contact using minimum duration filter.
264
+
265
+ Args:
266
+ is_stationary: Boolean array indicating stationary frames
267
+ min_contact_frames: Minimum consecutive frames to confirm contact
268
+
269
+ Returns:
270
+ Set of frame indices that meet minimum contact duration
271
+ """
272
+ contact_frames: set[int] = set()
273
+ current_run = []
274
+
275
+ for i, stationary in enumerate(is_stationary):
276
+ if stationary:
277
+ current_run.append(i)
278
+ else:
279
+ if len(current_run) >= min_contact_frames:
280
+ contact_frames.update(current_run)
281
+ current_run = []
282
+
283
+ # Handle last run
284
+ if len(current_run) >= min_contact_frames:
285
+ contact_frames.update(current_run)
286
+
287
+ return contact_frames
288
+
289
+
290
+ def _assign_contact_states(
291
+ n_frames: int,
292
+ contact_frames: set[int],
293
+ visibilities: np.ndarray | None,
294
+ visibility_threshold: float,
295
+ ) -> list[ContactState]:
296
+ """Assign contact states based on contact frames and visibility.
297
+
298
+ Args:
299
+ n_frames: Total number of frames
300
+ contact_frames: Set of frames with confirmed contact
301
+ visibilities: Optional visibility scores for each frame
302
+ visibility_threshold: Minimum visibility to trust landmark
303
+
304
+ Returns:
305
+ List of ContactState for each frame
306
+ """
307
+ states = []
308
+ for i in range(n_frames):
309
+ if visibilities is not None and visibilities[i] < visibility_threshold:
310
+ states.append(ContactState.UNKNOWN)
311
+ elif i in contact_frames:
312
+ states.append(ContactState.ON_GROUND)
313
+ else:
314
+ states.append(ContactState.IN_AIR)
315
+ return states
316
+
317
+
238
318
  def detect_ground_contact(
239
319
  foot_positions: np.ndarray,
240
320
  velocity_threshold: float = 0.02,
@@ -247,7 +327,7 @@ def detect_ground_contact(
247
327
  """
248
328
  Detect when feet are in contact with ground based on vertical motion.
249
329
 
250
- Uses derivative-based velocity calculation via Savitzky-Golay filter for smooth,
330
+ Uses derivative-based velocity calculation via Savitzky-Goyal filter for smooth,
251
331
  accurate velocity estimates. This is consistent with the velocity calculation used
252
332
  throughout the pipeline for sub-frame interpolation and curvature analysis.
253
333
 
@@ -264,52 +344,30 @@ def detect_ground_contact(
264
344
  List of ContactState for each frame
265
345
  """
266
346
  n_frames = len(foot_positions)
267
- states = [ContactState.UNKNOWN] * n_frames
268
347
 
269
348
  if n_frames < 2:
270
- return states
349
+ return [ContactState.UNKNOWN] * n_frames
271
350
 
272
351
  # Compute vertical velocity using derivative-based method
273
- # This provides smoother, more accurate velocity estimates than frame-to-frame differences
274
- # and is consistent with the velocity calculation used for sub-frame interpolation
275
352
  velocities = compute_velocity_from_derivative(
276
353
  foot_positions, window_length=window_length, polyorder=polyorder
277
354
  )
278
355
 
279
- # Detect potential contact frames based on low velocity
356
+ # Detect stationary frames based on velocity threshold
280
357
  is_stationary = np.abs(velocities) < velocity_threshold
281
358
 
282
359
  # Apply visibility filter
283
- if visibilities is not None:
284
- is_visible = visibilities > visibility_threshold
285
- is_stationary = is_stationary & is_visible
286
-
287
- # Apply minimum contact duration filter
288
- contact_frames = []
289
- current_run = []
290
-
291
- for i, stationary in enumerate(is_stationary):
292
- if stationary:
293
- current_run.append(i)
294
- else:
295
- if len(current_run) >= min_contact_frames:
296
- contact_frames.extend(current_run)
297
- current_run = []
298
-
299
- # Don't forget the last run
300
- if len(current_run) >= min_contact_frames:
301
- contact_frames.extend(current_run)
360
+ is_stationary = _filter_stationary_with_visibility(
361
+ is_stationary, visibilities, visibility_threshold
362
+ )
302
363
 
303
- # Set states
304
- for i in range(n_frames):
305
- if visibilities is not None and visibilities[i] < visibility_threshold:
306
- states[i] = ContactState.UNKNOWN
307
- elif i in contact_frames:
308
- states[i] = ContactState.ON_GROUND
309
- else:
310
- states[i] = ContactState.IN_AIR
364
+ # Find frames with sustained contact
365
+ contact_frames = _find_contact_frames(is_stationary, min_contact_frames)
311
366
 
312
- return states
367
+ # Assign states
368
+ return _assign_contact_states(
369
+ n_frames, contact_frames, visibilities, visibility_threshold
370
+ )
313
371
 
314
372
 
315
373
  def find_contact_phases(
@@ -19,6 +19,38 @@ if TYPE_CHECKING:
19
19
  from ..core.quality import QualityAssessment
20
20
 
21
21
 
22
+ def _format_float_metric(
23
+ value: float | None, multiplier: float = 1, decimals: int = 2
24
+ ) -> float | None:
25
+ """Format a float metric value with optional scaling and rounding.
26
+
27
+ Args:
28
+ value: The value to format, or None
29
+ multiplier: Factor to multiply value by (default: 1)
30
+ decimals: Number of decimal places to round to (default: 2)
31
+
32
+ Returns:
33
+ Formatted value rounded to specified decimals, or None if input is None
34
+ """
35
+ if value is None:
36
+ return None
37
+ return round(value * multiplier, decimals)
38
+
39
+
40
+ def _format_int_metric(value: float | int | None) -> int | None:
41
+ """Format a value as an integer.
42
+
43
+ Args:
44
+ value: The value to format, or None
45
+
46
+ Returns:
47
+ Value converted to int, or None if input is None
48
+ """
49
+ if value is None:
50
+ return None
51
+ return int(value)
52
+
53
+
22
54
  class DropJumpDataDict(TypedDict, total=False):
23
55
  """Type-safe dictionary for drop jump measurement data."""
24
56
 
@@ -69,94 +101,65 @@ class DropJumpMetrics:
69
101
  # Complete metadata
70
102
  self.result_metadata: ResultMetadata | None = None
71
103
 
72
- def to_dict(self) -> DropJumpResultDict:
73
- """Convert metrics to JSON-serializable dictionary with data/metadata structure.
104
+ def _build_data_dict(self) -> DropJumpDataDict:
105
+ """Build the data portion of the result dictionary.
74
106
 
75
107
  Returns:
76
- Dictionary with nested data and metadata structure.
108
+ Dictionary containing formatted metric values.
77
109
  """
78
- data: DropJumpDataDict = {
79
- "ground_contact_time_ms": (
80
- round(self.ground_contact_time * 1000, 2)
81
- if self.ground_contact_time is not None
82
- else None
83
- ),
84
- "flight_time_ms": (
85
- round(self.flight_time * 1000, 2)
86
- if self.flight_time is not None
87
- else None
110
+ return {
111
+ "ground_contact_time_ms": _format_float_metric(
112
+ self.ground_contact_time, 1000, 2
88
113
  ),
89
- "jump_height_m": (
90
- round(self.jump_height, 3) if self.jump_height is not None else None
114
+ "flight_time_ms": _format_float_metric(self.flight_time, 1000, 2),
115
+ "jump_height_m": _format_float_metric(self.jump_height, 1, 3),
116
+ "jump_height_kinematic_m": _format_float_metric(
117
+ self.jump_height_kinematic, 1, 3
91
118
  ),
92
- "jump_height_kinematic_m": (
93
- round(self.jump_height_kinematic, 3)
94
- if self.jump_height_kinematic is not None
95
- else None
119
+ "jump_height_trajectory_normalized": _format_float_metric(
120
+ self.jump_height_trajectory, 1, 4
96
121
  ),
97
- "jump_height_trajectory_normalized": (
98
- round(self.jump_height_trajectory, 4)
99
- if self.jump_height_trajectory is not None
100
- else None
122
+ "contact_start_frame": _format_int_metric(self.contact_start_frame),
123
+ "contact_end_frame": _format_int_metric(self.contact_end_frame),
124
+ "flight_start_frame": _format_int_metric(self.flight_start_frame),
125
+ "flight_end_frame": _format_int_metric(self.flight_end_frame),
126
+ "peak_height_frame": _format_int_metric(self.peak_height_frame),
127
+ "contact_start_frame_precise": _format_float_metric(
128
+ self.contact_start_frame_precise, 1, 3
101
129
  ),
102
- "contact_start_frame": (
103
- int(self.contact_start_frame)
104
- if self.contact_start_frame is not None
105
- else None
130
+ "contact_end_frame_precise": _format_float_metric(
131
+ self.contact_end_frame_precise, 1, 3
106
132
  ),
107
- "contact_end_frame": (
108
- int(self.contact_end_frame)
109
- if self.contact_end_frame is not None
110
- else None
133
+ "flight_start_frame_precise": _format_float_metric(
134
+ self.flight_start_frame_precise, 1, 3
111
135
  ),
112
- "flight_start_frame": (
113
- int(self.flight_start_frame)
114
- if self.flight_start_frame is not None
115
- else None
116
- ),
117
- "flight_end_frame": (
118
- int(self.flight_end_frame)
119
- if self.flight_end_frame is not None
120
- else None
121
- ),
122
- "peak_height_frame": (
123
- int(self.peak_height_frame)
124
- if self.peak_height_frame is not None
125
- else None
126
- ),
127
- "contact_start_frame_precise": (
128
- round(self.contact_start_frame_precise, 3)
129
- if self.contact_start_frame_precise is not None
130
- else None
131
- ),
132
- "contact_end_frame_precise": (
133
- round(self.contact_end_frame_precise, 3)
134
- if self.contact_end_frame_precise is not None
135
- else None
136
- ),
137
- "flight_start_frame_precise": (
138
- round(self.flight_start_frame_precise, 3)
139
- if self.flight_start_frame_precise is not None
140
- else None
141
- ),
142
- "flight_end_frame_precise": (
143
- round(self.flight_end_frame_precise, 3)
144
- if self.flight_end_frame_precise is not None
145
- else None
136
+ "flight_end_frame_precise": _format_float_metric(
137
+ self.flight_end_frame_precise, 1, 3
146
138
  ),
147
139
  }
148
140
 
149
- # Build metadata from ResultMetadata if available, otherwise use legacy quality
141
+ def _build_metadata_dict(self) -> dict:
142
+ """Build the metadata portion of the result dictionary.
143
+
144
+ Returns:
145
+ Metadata dictionary from available sources.
146
+ """
150
147
  if self.result_metadata is not None:
151
- metadata = self.result_metadata.to_dict()
152
- elif self.quality_assessment is not None:
153
- # Fallback for backwards compatibility during transition
154
- metadata = {"quality": self.quality_assessment.to_dict()}
155
- else:
156
- # No metadata available
157
- metadata = {}
158
-
159
- return {"data": data, "metadata": metadata}
148
+ return self.result_metadata.to_dict()
149
+ if self.quality_assessment is not None:
150
+ return {"quality": self.quality_assessment.to_dict()}
151
+ return {}
152
+
153
+ def to_dict(self) -> DropJumpResultDict:
154
+ """Convert metrics to JSON-serializable dictionary with data/metadata structure.
155
+
156
+ Returns:
157
+ Dictionary with nested data and metadata structure.
158
+ """
159
+ return {
160
+ "data": self._build_data_dict(),
161
+ "metadata": self._build_metadata_dict(),
162
+ }
160
163
 
161
164
 
162
165
  def _determine_drop_start_frame(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.25.0
3
+ Version: 0.26.1
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,5 +1,5 @@
1
1
  kinemotion/__init__.py,sha256=vAEIg-oDX1ZkQMnWgXd__tekaA5KUcEvdJSAGWS8VUY,722
2
- kinemotion/api.py,sha256=3oLJEjtHweG85t_BG1nCWnZ-8yl3tGW_6ZoBAILMfJw,38006
2
+ kinemotion/api.py,sha256=ELkAk0xq2MaafVwSAahXIf1KP9am8rpxHibqcnId6pg,38213
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
5
  kinemotion/cmj/analysis.py,sha256=4HYGn4VDIB6oExAees-VcPfpNgWOltpgwjyNTU7YAb4,18263
@@ -18,13 +18,13 @@ kinemotion/core/quality.py,sha256=OC9nuf5IrQ9xURf3eA50VoNWOqkGwbjJpS90q2FDQzA,13
18
18
  kinemotion/core/smoothing.py,sha256=x4o3BnG6k8OaV3emgpoJDF84CE9k5RYR7BeSYH_-8Es,14092
19
19
  kinemotion/core/video_io.py,sha256=0bJTheYidEqxGP5Y2dSO2x6sbOrnBDBu2TEiV8gT23A,7285
20
20
  kinemotion/dropjump/__init__.py,sha256=yc1XiZ9vfo5h_n7PKVSiX2TTgaIfGL7Y7SkQtiDZj_E,838
21
- kinemotion/dropjump/analysis.py,sha256=1AsIsgWg5wuwJo7poFK7aMCFr93yHVms-fEvaOGQQWs,27448
21
+ kinemotion/dropjump/analysis.py,sha256=BQ5NqSPNJjFQOb-W4bXSLvjCgWd-nvqx5NElyeqZJC4,29067
22
22
  kinemotion/dropjump/cli.py,sha256=ZyroaYPwz8TgfL39Wcaj6m68Awl6lYXC75ttaflU-c0,16236
23
23
  kinemotion/dropjump/debug_overlay.py,sha256=LkPw6ucb7beoYWS4L-Lvjs1KLCm5wAWDAfiznUeV2IQ,5668
24
- kinemotion/dropjump/kinematics.py,sha256=PaVakc8eiYR6ZErp2jO3A8Ey-rNIso0rGLft6-yOEzs,17510
24
+ kinemotion/dropjump/kinematics.py,sha256=Ig9TqXr-OEUm19gqIvUjQkqrCuw1csYt1f4ZfwG8oGc,17464
25
25
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
- kinemotion-0.25.0.dist-info/METADATA,sha256=lJ39uLFmaTzqvZXJNw8eWBwwvWIeM2VQDd1D2cDcBUw,23244
27
- kinemotion-0.25.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
- kinemotion-0.25.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
29
- kinemotion-0.25.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
30
- kinemotion-0.25.0.dist-info/RECORD,,
26
+ kinemotion-0.26.1.dist-info/METADATA,sha256=4Ads7Gis9jvPj3qOXQBwyGB6c3wJr8kkVWpX-kKwI1Q,23244
27
+ kinemotion-0.26.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
28
+ kinemotion-0.26.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
29
+ kinemotion-0.26.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
30
+ kinemotion-0.26.1.dist-info/RECORD,,