kinemotion 0.72.1__py3-none-any.whl → 0.74.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

Files changed (34) hide show
  1. kinemotion/__init__.py +2 -2
  2. kinemotion/api.py +6 -6
  3. kinemotion/cli.py +2 -2
  4. kinemotion/core/__init__.py +11 -0
  5. kinemotion/core/auto_tuning.py +74 -27
  6. kinemotion/core/cli_utils.py +74 -0
  7. kinemotion/core/pipeline_utils.py +2 -2
  8. kinemotion/core/quality.py +4 -6
  9. kinemotion/core/validation.py +70 -0
  10. kinemotion/core/video_analysis_base.py +132 -0
  11. kinemotion/core/video_io.py +27 -18
  12. kinemotion/{cmj → countermovement_jump}/analysis.py +0 -97
  13. kinemotion/{cmj → countermovement_jump}/api.py +37 -11
  14. kinemotion/{cmj → countermovement_jump}/cli.py +6 -46
  15. kinemotion/{cmj → countermovement_jump}/metrics_validator.py +150 -236
  16. kinemotion/{dj → drop_jump}/analysis.py +54 -29
  17. kinemotion/{dj → drop_jump}/api.py +46 -16
  18. kinemotion/{dj → drop_jump}/cli.py +8 -58
  19. kinemotion/{dj → drop_jump}/kinematics.py +98 -50
  20. kinemotion/{dj → drop_jump}/metrics_validator.py +25 -51
  21. {kinemotion-0.72.1.dist-info → kinemotion-0.74.0.dist-info}/METADATA +1 -1
  22. kinemotion-0.74.0.dist-info/RECORD +51 -0
  23. kinemotion-0.72.1.dist-info/RECORD +0 -50
  24. /kinemotion/{cmj → countermovement_jump}/__init__.py +0 -0
  25. /kinemotion/{cmj → countermovement_jump}/debug_overlay.py +0 -0
  26. /kinemotion/{cmj → countermovement_jump}/joint_angles.py +0 -0
  27. /kinemotion/{cmj → countermovement_jump}/kinematics.py +0 -0
  28. /kinemotion/{cmj → countermovement_jump}/validation_bounds.py +0 -0
  29. /kinemotion/{dj → drop_jump}/__init__.py +0 -0
  30. /kinemotion/{dj → drop_jump}/debug_overlay.py +0 -0
  31. /kinemotion/{dj → drop_jump}/validation_bounds.py +0 -0
  32. {kinemotion-0.72.1.dist-info → kinemotion-0.74.0.dist-info}/WHEEL +0 -0
  33. {kinemotion-0.72.1.dist-info → kinemotion-0.74.0.dist-info}/entry_points.txt +0 -0
  34. {kinemotion-0.72.1.dist-info → kinemotion-0.74.0.dist-info}/licenses/LICENSE +0 -0
@@ -677,6 +677,48 @@ def refine_transition_with_curvature(
677
677
  return refined_frame
678
678
 
679
679
 
680
+ def _refine_phase_boundaries(
681
+ foot_positions: FloatArray,
682
+ start_frac: float,
683
+ end_frac: float,
684
+ start_type: str,
685
+ end_type: str,
686
+ smoothing_window: int,
687
+ polyorder: int,
688
+ ) -> tuple[float, float]:
689
+ """Refine phase boundary frames using curvature analysis.
690
+
691
+ Args:
692
+ foot_positions: Array of foot y-positions (normalized, 0-1)
693
+ start_frac: Start frame (fractional)
694
+ end_frac: End frame (fractional)
695
+ start_type: Transition type for start ("landing" or "takeoff")
696
+ end_type: Transition type for end ("landing" or "takeoff")
697
+ smoothing_window: Window size for acceleration computation
698
+ polyorder: Polynomial order for Savitzky-Golay filter
699
+
700
+ Returns:
701
+ Tuple of (refined_start, refined_end) fractional frame indices
702
+ """
703
+ refined_start = refine_transition_with_curvature(
704
+ foot_positions,
705
+ start_frac,
706
+ start_type,
707
+ search_window=3,
708
+ smoothing_window=smoothing_window,
709
+ polyorder=polyorder,
710
+ )
711
+ refined_end = refine_transition_with_curvature(
712
+ foot_positions,
713
+ end_frac,
714
+ end_type,
715
+ search_window=3,
716
+ smoothing_window=smoothing_window,
717
+ polyorder=polyorder,
718
+ )
719
+ return refined_start, refined_end
720
+
721
+
680
722
  def find_interpolated_phase_transitions_with_curvature(
681
723
  foot_positions: FloatArray,
682
724
  contact_states: list[ContactState],
@@ -716,47 +758,30 @@ def find_interpolated_phase_transitions_with_curvature(
716
758
  refined_phases: list[tuple[float, float, ContactState]] = []
717
759
 
718
760
  for start_frac, end_frac, state in interpolated_phases:
719
- refined_start = start_frac
720
- refined_end = end_frac
721
-
722
761
  if state == ContactState.ON_GROUND:
723
- # Refine landing (start of ground contact)
724
- refined_start = refine_transition_with_curvature(
762
+ # ON_GROUND: landing at start, takeoff at end
763
+ refined_start, refined_end = _refine_phase_boundaries(
725
764
  foot_positions,
726
765
  start_frac,
727
- "landing",
728
- search_window=3,
729
- smoothing_window=smoothing_window,
730
- polyorder=polyorder,
731
- )
732
- # Refine takeoff (end of ground contact)
733
- refined_end = refine_transition_with_curvature(
734
- foot_positions,
735
766
  end_frac,
767
+ "landing",
736
768
  "takeoff",
737
- search_window=3,
738
- smoothing_window=smoothing_window,
739
- polyorder=polyorder,
769
+ smoothing_window,
770
+ polyorder,
740
771
  )
741
-
742
772
  elif state == ContactState.IN_AIR:
743
- # For flight phases, takeoff is at start, landing is at end
744
- refined_start = refine_transition_with_curvature(
773
+ # IN_AIR: takeoff at start, landing at end
774
+ refined_start, refined_end = _refine_phase_boundaries(
745
775
  foot_positions,
746
776
  start_frac,
747
- "takeoff",
748
- search_window=3,
749
- smoothing_window=smoothing_window,
750
- polyorder=polyorder,
751
- )
752
- refined_end = refine_transition_with_curvature(
753
- foot_positions,
754
777
  end_frac,
778
+ "takeoff",
755
779
  "landing",
756
- search_window=3,
757
- smoothing_window=smoothing_window,
758
- polyorder=polyorder,
780
+ smoothing_window,
781
+ polyorder,
759
782
  )
783
+ else:
784
+ refined_start, refined_end = start_frac, end_frac
760
785
 
761
786
  refined_phases.append((refined_start, refined_end, state))
762
787
 
@@ -8,8 +8,11 @@ from pathlib import Path
8
8
  from typing import TYPE_CHECKING
9
9
 
10
10
  if TYPE_CHECKING:
11
+ import numpy as np
11
12
  from numpy.typing import NDArray
12
13
 
14
+ from .analysis import ContactState
15
+
13
16
  from ..core.auto_tuning import (
14
17
  AnalysisParameters,
15
18
  QualityPreset,
@@ -57,6 +60,7 @@ __all__ = [
57
60
  "DropJumpVideoConfig",
58
61
  "DropJumpVideoResult",
59
62
  "process_dropjump_video",
63
+ "process_dropjump_video_from_config",
60
64
  "process_dropjump_videos_bulk",
61
65
  ]
62
66
 
@@ -98,14 +102,33 @@ class DropJumpVideoConfig:
98
102
  overrides: AnalysisOverrides | None = None
99
103
  detection_confidence: float | None = None
100
104
  tracking_confidence: float | None = None
105
+ verbose: bool = False
106
+ timer: Timer | None = None
107
+ pose_tracker: "MediaPipePoseTracker | None" = None
108
+
109
+ def to_kwargs(self) -> dict:
110
+ """Convert config to kwargs dict for process_dropjump_video."""
111
+ return {
112
+ "video_path": self.video_path,
113
+ "quality": self.quality,
114
+ "output_video": self.output_video,
115
+ "json_output": self.json_output,
116
+ "drop_start_frame": self.drop_start_frame,
117
+ "overrides": self.overrides,
118
+ "detection_confidence": self.detection_confidence,
119
+ "tracking_confidence": self.tracking_confidence,
120
+ "verbose": self.verbose,
121
+ "timer": self.timer,
122
+ "pose_tracker": self.pose_tracker,
123
+ }
101
124
 
102
125
 
103
126
  def _assess_dropjump_quality(
104
- vertical_positions: "NDArray",
105
- visibilities: "NDArray",
106
- contact_states: list,
127
+ vertical_positions: "NDArray[np.float64]",
128
+ visibilities: "NDArray[np.float64]",
129
+ contact_states: list["ContactState"],
107
130
  fps: float,
108
- ) -> tuple:
131
+ ) -> tuple[QualityAssessment, "NDArray[np.bool_]", bool, int]:
109
132
  """Assess tracking quality and detect phases.
110
133
 
111
134
  Returns:
@@ -607,6 +630,23 @@ def process_dropjump_video(
607
630
  return metrics
608
631
 
609
632
 
633
+ def process_dropjump_video_from_config(
634
+ config: DropJumpVideoConfig,
635
+ ) -> DropJumpMetrics:
636
+ """Process a drop jump video using a configuration object.
637
+
638
+ This is a convenience wrapper around process_dropjump_video that
639
+ accepts a DropJumpVideoConfig instead of individual parameters.
640
+
641
+ Args:
642
+ config: Configuration object containing all analysis parameters
643
+
644
+ Returns:
645
+ DropJumpMetrics object containing analysis results
646
+ """
647
+ return process_dropjump_video(**config.to_kwargs())
648
+
649
+
610
650
  def process_dropjump_videos_bulk(
611
651
  configs: list[DropJumpVideoConfig],
612
652
  max_workers: int = 4,
@@ -633,18 +673,8 @@ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVide
633
673
  start_time = time.perf_counter()
634
674
 
635
675
  try:
636
- metrics = process_dropjump_video(
637
- video_path=config.video_path,
638
- quality=config.quality,
639
- output_video=config.output_video,
640
- json_output=config.json_output,
641
- drop_start_frame=config.drop_start_frame,
642
- overrides=config.overrides,
643
- detection_confidence=config.detection_confidence,
644
- tracking_confidence=config.tracking_confidence,
645
- verbose=False,
646
- )
647
-
676
+ # Use convenience wrapper to avoid parameter unpacking
677
+ metrics = process_dropjump_video_from_config(config)
648
678
  processing_time = time.perf_counter() - start_time
649
679
 
650
680
  return DropJumpVideoResult(
@@ -10,8 +10,12 @@ from typing import TYPE_CHECKING
10
10
  import click
11
11
 
12
12
  from ..core.cli_utils import (
13
+ batch_processing_options,
13
14
  collect_video_files,
15
+ common_output_options,
14
16
  generate_batch_output_paths,
17
+ quality_option,
18
+ verbose_option,
15
19
  )
16
20
  from .api import (
17
21
  DropJumpVideoConfig,
@@ -39,64 +43,10 @@ class AnalysisParameters:
39
43
 
40
44
  @click.command(name="dropjump-analyze")
41
45
  @click.argument("video_path", nargs=-1, type=click.Path(exists=False), required=True)
42
- @click.option(
43
- "--output",
44
- "-o",
45
- type=click.Path(),
46
- help="Path for debug video output (optional)",
47
- )
48
- @click.option(
49
- "--json-output",
50
- "-j",
51
- type=click.Path(),
52
- help="Path for JSON metrics output (default: stdout)",
53
- )
54
- @click.option(
55
- "--quality",
56
- type=click.Choice(["fast", "balanced", "accurate"], case_sensitive=False),
57
- default="balanced",
58
- help=(
59
- "Analysis quality preset: "
60
- "fast (quick, less precise), "
61
- "balanced (default, good for most cases), "
62
- "accurate (research-grade, slower)"
63
- ),
64
- show_default=True,
65
- )
66
- @click.option(
67
- "--verbose",
68
- "-v",
69
- is_flag=True,
70
- help="Show auto-selected parameters and analysis details",
71
- )
72
- # Batch processing options
73
- @click.option(
74
- "--batch",
75
- is_flag=True,
76
- help="Enable batch processing mode for multiple videos",
77
- )
78
- @click.option(
79
- "--workers",
80
- type=int,
81
- default=4,
82
- help="Number of parallel workers for batch processing (default: 4)",
83
- show_default=True,
84
- )
85
- @click.option(
86
- "--output-dir",
87
- type=click.Path(),
88
- help="Directory for debug video outputs (batch mode only)",
89
- )
90
- @click.option(
91
- "--json-output-dir",
92
- type=click.Path(),
93
- help="Directory for JSON metrics outputs (batch mode only)",
94
- )
95
- @click.option(
96
- "--csv-summary",
97
- type=click.Path(),
98
- help="Path for CSV summary export (batch mode only)",
99
- )
46
+ @common_output_options
47
+ @quality_option
48
+ @verbose_option
49
+ @batch_processing_options
100
50
  # Expert parameters (hidden in help, but always available for advanced users)
101
51
  @click.option(
102
52
  "--drop-start-frame",
@@ -226,8 +226,90 @@ def _compute_robust_phase_position(
226
226
  return float(np.median(window_positions))
227
227
 
228
228
 
229
+ def _detect_drop_jump_air_first_pattern(
230
+ air_phases_indexed: list[tuple[int, int, int]],
231
+ ground_phases: list[tuple[int, int, int]],
232
+ ) -> tuple[int, int] | None:
233
+ """Detect drop jump using air-first pattern (box + drop classified as IN_AIR).
234
+
235
+ Pattern: IN_AIR(box+drop) → ON_GROUND(contact) → IN_AIR(flight) → ON_GROUND(land)
236
+
237
+ Args:
238
+ air_phases_indexed: Air phases with indices
239
+ ground_phases: Ground phases with indices
240
+
241
+ Returns:
242
+ (contact_start, contact_end) if drop jump detected, None otherwise
243
+ """
244
+ if not air_phases_indexed or len(ground_phases) < 2:
245
+ return None
246
+
247
+ _, _, first_air_idx = air_phases_indexed[0]
248
+ first_ground_start, first_ground_end, first_ground_idx = ground_phases[0]
249
+
250
+ # Drop jump: first phase is IN_AIR (index 0), second phase is ground (index 1)
251
+ if first_air_idx != 0 or first_ground_idx != 1:
252
+ return None
253
+
254
+ # Check for flight phase after contact
255
+ air_after_contact = [i for _, _, i in air_phases_indexed if i > first_ground_idx]
256
+ if not air_after_contact:
257
+ return None
258
+
259
+ return first_ground_start, first_ground_end
260
+
261
+
262
+ def _detect_drop_jump_height_pattern(
263
+ air_phases_indexed: list[tuple[int, int, int]],
264
+ ground_phases: list[tuple[int, int, int]],
265
+ foot_y_positions: NDArray[np.float64],
266
+ ) -> tuple[int, int] | None:
267
+ """Detect drop jump using height comparison (box detected as ground).
268
+
269
+ Legacy detection: first ground is on elevated box (lower y value).
270
+
271
+ Args:
272
+ air_phases_indexed: Air phases with indices
273
+ ground_phases: Ground phases with indices
274
+ foot_y_positions: Vertical position array
275
+
276
+ Returns:
277
+ (contact_start, contact_end) if drop jump detected, None otherwise
278
+ """
279
+ if not air_phases_indexed or len(ground_phases) < 2:
280
+ return None
281
+
282
+ _, _, first_air_idx = air_phases_indexed[0]
283
+ first_ground_start, first_ground_end, first_ground_idx = ground_phases[0]
284
+
285
+ # This pattern: first ground is before first air (athlete on box)
286
+ if first_ground_idx >= first_air_idx:
287
+ return None
288
+
289
+ ground_after_air = [
290
+ (start, end, idx) for start, end, idx in ground_phases if idx > first_air_idx
291
+ ]
292
+ if not ground_after_air:
293
+ return None
294
+
295
+ first_ground_y = _compute_robust_phase_position(
296
+ foot_y_positions, first_ground_start, first_ground_end
297
+ )
298
+ second_ground_start, second_ground_end, _ = ground_after_air[0]
299
+ second_ground_y = _compute_robust_phase_position(
300
+ foot_y_positions, second_ground_start, second_ground_end
301
+ )
302
+
303
+ # If second ground is significantly lower (>7% of frame), it's a drop jump
304
+ height_diff = second_ground_y - first_ground_y
305
+ if height_diff <= 0.07:
306
+ return None
307
+
308
+ return second_ground_start, second_ground_end
309
+
310
+
229
311
  def _identify_main_contact_phase(
230
- phases: list[tuple[int, int, ContactState]],
312
+ phases: list[tuple[int, int, ContactState]], # noqa: ARG001 # Used in caller for context
231
313
  ground_phases: list[tuple[int, int, int]],
232
314
  air_phases_indexed: list[tuple[int, int, int]],
233
315
  foot_y_positions: NDArray[np.float64],
@@ -253,55 +335,21 @@ def _identify_main_contact_phase(
253
335
  Returns:
254
336
  Tuple of (contact_start, contact_end, is_drop_jump)
255
337
  """
256
- # Initialize with first ground phase as fallback
257
- contact_start, contact_end = ground_phases[0][0], ground_phases[0][1]
258
- is_drop_jump = False
259
-
260
- # Check if this looks like a drop jump pattern:
261
- # Pattern: starts with IN_AIR ON_GROUND → IN_AIR → ON_GROUND
262
- if air_phases_indexed and len(ground_phases) >= 2:
263
- _, _, first_air_idx = air_phases_indexed[0]
264
- first_ground_start, first_ground_end, first_ground_idx = ground_phases[0]
265
-
266
- # Drop jump pattern: first phase is IN_AIR (athlete on box/dropping)
267
- # followed by ground contact, then flight, then landing
268
- if first_air_idx == 0 and first_ground_idx == 1:
269
- # First phase is air (box + drop), second phase is ground (contact)
270
- # Check if there's a flight phase after contact
271
- air_after_contact = [
272
- (s, e, i) for s, e, i in air_phases_indexed if i > first_ground_idx
273
- ]
274
- if air_after_contact:
275
- # This is a drop jump: first ground = contact, last ground = landing
276
- is_drop_jump = True
277
- contact_start, contact_end = first_ground_start, first_ground_end
278
-
279
- # Legacy detection: first ground is on elevated box (lower y)
280
- # This handles cases where box level IS detected as ground
281
- if not is_drop_jump and first_ground_idx < first_air_idx:
282
- ground_after_air = [
283
- (start, end, idx) for start, end, idx in ground_phases if idx > first_air_idx
284
- ]
285
- if ground_after_air:
286
- first_ground_y = _compute_robust_phase_position(
287
- foot_y_positions, first_ground_start, first_ground_end
288
- )
289
- second_ground_start, second_ground_end, _ = ground_after_air[0]
290
- second_ground_y = _compute_robust_phase_position(
291
- foot_y_positions, second_ground_start, second_ground_end
292
- )
293
- # If first ground is significantly higher (>7% of frame), it's a drop jump
294
- if second_ground_y - first_ground_y > 0.07:
295
- is_drop_jump = True
296
- contact_start, contact_end = second_ground_start, second_ground_end
297
-
298
- if not is_drop_jump:
299
- # Regular jump: use longest ground contact phase
300
- contact_start, contact_end = max(
301
- [(s, e) for s, e, _ in ground_phases], key=lambda p: p[1] - p[0]
302
- )
303
-
304
- return contact_start, contact_end, is_drop_jump
338
+ # Try air-first detection pattern (most common for clean videos)
339
+ result = _detect_drop_jump_air_first_pattern(air_phases_indexed, ground_phases)
340
+ if result is not None:
341
+ return result[0], result[1], True
342
+
343
+ # Try height-based detection (fallback for box-as-ground videos)
344
+ result = _detect_drop_jump_height_pattern(air_phases_indexed, ground_phases, foot_y_positions)
345
+ if result is not None:
346
+ return result[0], result[1], True
347
+
348
+ # Regular jump: use longest ground contact phase
349
+ contact_start, contact_end = max(
350
+ [(s, e) for s, e, _ in ground_phases], key=lambda p: p[1] - p[0]
351
+ )
352
+ return contact_start, contact_end, False
305
353
 
306
354
 
307
355
  def _find_precise_phase_timing(
@@ -14,7 +14,7 @@ from kinemotion.core.validation import (
14
14
  MetricsValidator,
15
15
  ValidationResult,
16
16
  )
17
- from kinemotion.dj.validation_bounds import (
17
+ from kinemotion.drop_jump.validation_bounds import (
18
18
  DropJumpBounds,
19
19
  estimate_athlete_profile,
20
20
  )
@@ -114,63 +114,37 @@ class DropJumpMetricsValidator(MetricsValidator):
114
114
  ) -> None:
115
115
  """Validate contact time."""
116
116
  contact_time_s = contact_time_ms / 1000.0
117
- bounds = DropJumpBounds.CONTACT_TIME
118
-
119
- if not bounds.is_physically_possible(contact_time_s):
120
- result.add_error(
121
- "contact_time",
122
- f"Contact time {contact_time_s:.3f}s physically impossible",
123
- value=contact_time_s,
124
- bounds=(bounds.absolute_min, bounds.absolute_max),
125
- )
126
- elif result.athlete_profile and not bounds.contains(
127
- contact_time_s, result.athlete_profile
128
- ):
129
- profile_name = result.athlete_profile.value
130
- result.add_warning(
131
- "contact_time",
132
- f"Contact time {contact_time_s:.3f}s unusual for {profile_name} athlete",
133
- value=contact_time_s,
134
- )
117
+ self._validate_metric_with_bounds(
118
+ name="contact_time",
119
+ value=contact_time_s,
120
+ bounds=DropJumpBounds.CONTACT_TIME,
121
+ profile=result.athlete_profile,
122
+ result=result,
123
+ format_str="{value:.3f}s",
124
+ )
135
125
 
136
126
  def _check_flight_time(self, flight_time_ms: float, result: DropJumpValidationResult) -> None:
137
127
  """Validate flight time."""
138
128
  flight_time_s = flight_time_ms / 1000.0
139
- bounds = DropJumpBounds.FLIGHT_TIME
140
-
141
- if not bounds.is_physically_possible(flight_time_s):
142
- result.add_error(
143
- "flight_time",
144
- f"Flight time {flight_time_s:.3f}s physically impossible",
145
- value=flight_time_s,
146
- bounds=(bounds.absolute_min, bounds.absolute_max),
147
- )
148
- elif result.athlete_profile and not bounds.contains(flight_time_s, result.athlete_profile):
149
- profile_name = result.athlete_profile.value
150
- result.add_warning(
151
- "flight_time",
152
- f"Flight time {flight_time_s:.3f}s unusual for {profile_name} athlete",
153
- value=flight_time_s,
154
- )
129
+ self._validate_metric_with_bounds(
130
+ name="flight_time",
131
+ value=flight_time_s,
132
+ bounds=DropJumpBounds.FLIGHT_TIME,
133
+ profile=result.athlete_profile,
134
+ result=result,
135
+ format_str="{value:.3f}s",
136
+ )
155
137
 
156
138
  def _check_jump_height(self, jump_height_m: float, result: DropJumpValidationResult) -> None:
157
139
  """Validate jump height."""
158
- bounds = DropJumpBounds.JUMP_HEIGHT
159
-
160
- if not bounds.is_physically_possible(jump_height_m):
161
- result.add_error(
162
- "jump_height",
163
- f"Jump height {jump_height_m:.3f}m physically impossible",
164
- value=jump_height_m,
165
- bounds=(bounds.absolute_min, bounds.absolute_max),
166
- )
167
- elif result.athlete_profile and not bounds.contains(jump_height_m, result.athlete_profile):
168
- profile_name = result.athlete_profile.value
169
- result.add_warning(
170
- "jump_height",
171
- f"Jump height {jump_height_m:.3f}m unusual for {profile_name} athlete",
172
- value=jump_height_m,
173
- )
140
+ self._validate_metric_with_bounds(
141
+ name="jump_height",
142
+ value=jump_height_m,
143
+ bounds=DropJumpBounds.JUMP_HEIGHT,
144
+ profile=result.athlete_profile,
145
+ result=result,
146
+ format_str="{value:.3f}m",
147
+ )
174
148
 
175
149
  def _check_rsi(
176
150
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.72.1
3
+ Version: 0.74.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -0,0 +1,51 @@
1
+ kinemotion/__init__.py,sha256=GovgX2mxUmfTH2HsMqlOCfU28niiTdu0c7iTIACcly4,1079
2
+ kinemotion/api.py,sha256=x7rHYgh-bpc6a1kMp9fnnFkU7y96qYelDbzcqN3oUKY,1110
3
+ kinemotion/cli.py,sha256=H3--5whfauDuOyW31lrK1MF-2U-61V11ft5RIndZuUU,644
4
+ kinemotion/core/__init__.py,sha256=E7HDsetuKTJ7EBb8ftNifJoKS8uhS4snRO7HgISyBoM,2035
5
+ kinemotion/core/auto_tuning.py,sha256=ZDmHJJw69wzlRkEJ8OtLPE6KV5wLHcMha1RePVRICkI,11836
6
+ kinemotion/core/cli_utils.py,sha256=6VA8HSnVvKaWMm7d9ahrqwFPXuRPp53s0dib1yCE7yQ,4179
7
+ kinemotion/core/debug_overlay_utils.py,sha256=QaVkHuFZpXUrdiMlm8ylQn6baJOj8jcZeiV4kDqODt0,17441
8
+ kinemotion/core/determinism.py,sha256=Frw-KAOvAxTL_XtxoWpXCjMbQPUKEAusK6JctlkeuRo,2509
9
+ kinemotion/core/experimental.py,sha256=G1EpkmWQ8d-rPaN1n0P7mF6XUzrbW0Br3nVkIzJ1D9M,3694
10
+ kinemotion/core/filtering.py,sha256=7KUeclXqZpNQA8WKNocDwhCxZpwwtizI3wvAEyq9SBo,11603
11
+ kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
12
+ kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
13
+ kinemotion/core/model_downloader.py,sha256=mqhJBHGaNe0aN9qbcBqvcTk9FDd7xaHqEcwD-fyP89c,5205
14
+ kinemotion/core/overlay_constants.py,sha256=zZreHHWe00p2XuCJsbRFqN6g-AAUAnx53LwKqHm1Bl8,1438
15
+ kinemotion/core/pipeline_utils.py,sha256=i0x9HM2wzQDvvD9Y54VPIU20Nu-k-IYy6AY9fHPAetY,15179
16
+ kinemotion/core/pose.py,sha256=Z795p0EnaTUeWHO8FuApFcMGTLwZ47JOjs5f5TzRvdk,14224
17
+ kinemotion/core/pose_landmarks.py,sha256=LcEbL5K5xKia6dCzWf6Ft18UIE1CLMMqCZ3KUjwUDzM,1558
18
+ kinemotion/core/quality.py,sha256=JxuM6jwO9i07S6Don3Fki9eFDyITQ6OYJr8YhPSTL7s,13010
19
+ kinemotion/core/smoothing.py,sha256=F1DCsnvPBi62XJLygOJ5MkNlRa7BCLg_E9ORtCWcoKk,16562
20
+ kinemotion/core/timing.py,sha256=ITX77q4hbtajRuWfgwYhws8nCvOeKFlEdKjCu8lD9_w,7938
21
+ kinemotion/core/types.py,sha256=m141buSkEsqflt5VFaTHtRq_IcimjI3_T_EfaNpIVxY,1652
22
+ kinemotion/core/validation.py,sha256=-8Wwe56PO37F0OAEMpWr1AB_7QmFtDY5bVmux3oiLYM,9585
23
+ kinemotion/core/video_analysis_base.py,sha256=U8j-6-dv6uiGUiIHl53AIVFUiVHotgTmMNvCArSXx0E,4045
24
+ kinemotion/core/video_io.py,sha256=tLAHm63_sap-CXQpLzmgUXpWZ5_TtBI9LHP8Tk2L-z4,9355
25
+ kinemotion/countermovement_jump/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
26
+ kinemotion/countermovement_jump/analysis.py,sha256=0ocj1ZbaV4rPeor-y6jvDIKsGalZoozwo7kxwFhQQ1w,20637
27
+ kinemotion/countermovement_jump/api.py,sha256=uNo2JLuFDeBdpi3Y2qf-DyG-1KIRwmSF7HjXMu9Cwj0,19320
28
+ kinemotion/countermovement_jump/cli.py,sha256=m727IOg5BuixgNraCXc2sjW5jGrxrg7RKvFS4qyrBK8,8902
29
+ kinemotion/countermovement_jump/debug_overlay.py,sha256=vF5Apiz8zDRpgrVzf52manLW99m1kHQAPSdUkar5rPs,11474
30
+ kinemotion/countermovement_jump/joint_angles.py,sha256=by5M4LDtUfd2_Z9DmcgUl0nsvarsBYjgsE8KWWYcn08,11255
31
+ kinemotion/countermovement_jump/kinematics.py,sha256=KwA8uSj3g1SeNf0NXMSHsp3gIw6Gfa-6QWIwdYdRXYw,13362
32
+ kinemotion/countermovement_jump/metrics_validator.py,sha256=Gozn88jBpe77GhLIMYZfcAlfAmu4_k9R73bCfcwUsTI,24691
33
+ kinemotion/countermovement_jump/validation_bounds.py,sha256=-0iXDhH-RntiGZi_Co22V6qtA5D-hLzkrPkVcfoNd2U,11343
34
+ kinemotion/drop_jump/__init__.py,sha256=yBbEbPdY6sqozWtTvfbvuUZnrVWSSjBp61xK34M29F4,878
35
+ kinemotion/drop_jump/analysis.py,sha256=ppbhB6Z9GTPzFlIOXso7tq0ldCpf1EZbYk2v31ClLV8,33998
36
+ kinemotion/drop_jump/api.py,sha256=xcA7CkBjLQZhIs6UHr2mo5jT1p-D6e3cgRAFLSwZCmE,21927
37
+ kinemotion/drop_jump/cli.py,sha256=WTUJWCjBl9SgR3Z-2cml1EQhVF8HaXIXQ27fS4tnR7U,14693
38
+ kinemotion/drop_jump/debug_overlay.py,sha256=X4mvCi5Qi1gnvSZZAsUs-0ZRUx9mVBbEUznOFO21HO8,8470
39
+ kinemotion/drop_jump/kinematics.py,sha256=59Q035bXAGGEAdrfLA2mALkWfJUifs35Tvk89xfU8pc,23657
40
+ kinemotion/drop_jump/metrics_validator.py,sha256=yY0wzFzUUDLn6pZcOnwErMlIt_aTWq-RyAkqIemBG5M,7885
41
+ kinemotion/drop_jump/validation_bounds.py,sha256=k31qy-kCXTiCTx0RPo2t8yZ-faLxqGO-AeF05QfBFb0,5125
42
+ kinemotion/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
+ kinemotion/models/pose_landmarker_lite.task,sha256=WZKeHR7pUodzXd2DOxnPSsRtKbx6_du_Z1PEWWkNV0o,5777746
44
+ kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx,sha256=dfZTq8kbhv8RxWiXS0HUIJNCUpxYTBN45dFIorPflEs,133
45
+ kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx,sha256=UsutHVQ6GP3X5pCcp52EN8q7o2J3d-TnxZqlF48kY6I,133
46
+ kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
+ kinemotion-0.74.0.dist-info/METADATA,sha256=lg3dJnKHLKkm-_hs373v_JfjXrJniQp5ngwe9jKMVaU,26125
48
+ kinemotion-0.74.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
49
+ kinemotion-0.74.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
50
+ kinemotion-0.74.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
51
+ kinemotion-0.74.0.dist-info/RECORD,,
@@ -1,50 +0,0 @@
1
- kinemotion/__init__.py,sha256=jlhHJlxZJbp7CPfRWMlxwfDISBCdHadjigmXvcahXxU,1055
2
- kinemotion/api.py,sha256=1GMi_7SbU3EyDdDRtnF55roB8iMR0t91qGOkqa4E6sI,1028
3
- kinemotion/cli.py,sha256=ugrc1Dpx7abYGEyIDcslzNu8KR0VoOoGGUGUm87E_kQ,620
4
- kinemotion/cmj/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
5
- kinemotion/cmj/analysis.py,sha256=EQydClIbNkIj-FmCZGaPQe-COVW8fbO3139i9z1vomA,23643
6
- kinemotion/cmj/api.py,sha256=P_lbqEqAKPO5n1Xn4IQZKNj9nLaO3ljkN2PgqvExGXU,18435
7
- kinemotion/cmj/cli.py,sha256=P2b77IIw6kqTSIkncxlShzhmjIwqMFBNd-pZxYP-TsI,9918
8
- kinemotion/cmj/debug_overlay.py,sha256=vF5Apiz8zDRpgrVzf52manLW99m1kHQAPSdUkar5rPs,11474
9
- kinemotion/cmj/joint_angles.py,sha256=by5M4LDtUfd2_Z9DmcgUl0nsvarsBYjgsE8KWWYcn08,11255
10
- kinemotion/cmj/kinematics.py,sha256=KwA8uSj3g1SeNf0NXMSHsp3gIw6Gfa-6QWIwdYdRXYw,13362
11
- kinemotion/cmj/metrics_validator.py,sha256=IQofafpwLCXER3ucZXNfiJKFFKPOVxXnC4BNLHOMnNY,30013
12
- kinemotion/cmj/validation_bounds.py,sha256=-0iXDhH-RntiGZi_Co22V6qtA5D-hLzkrPkVcfoNd2U,11343
13
- kinemotion/core/__init__.py,sha256=8hMvfNK7v_eqswuk_J5s5FRGvPtp2-R4kasVMGchFkM,1766
14
- kinemotion/core/auto_tuning.py,sha256=rliPTLueMbOjYRb4hjb0af7DVMtxLT92wpnVve75GvA,10478
15
- kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
16
- kinemotion/core/debug_overlay_utils.py,sha256=QaVkHuFZpXUrdiMlm8ylQn6baJOj8jcZeiV4kDqODt0,17441
17
- kinemotion/core/determinism.py,sha256=Frw-KAOvAxTL_XtxoWpXCjMbQPUKEAusK6JctlkeuRo,2509
18
- kinemotion/core/experimental.py,sha256=G1EpkmWQ8d-rPaN1n0P7mF6XUzrbW0Br3nVkIzJ1D9M,3694
19
- kinemotion/core/filtering.py,sha256=7KUeclXqZpNQA8WKNocDwhCxZpwwtizI3wvAEyq9SBo,11603
20
- kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
21
- kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
22
- kinemotion/core/model_downloader.py,sha256=mqhJBHGaNe0aN9qbcBqvcTk9FDd7xaHqEcwD-fyP89c,5205
23
- kinemotion/core/overlay_constants.py,sha256=zZreHHWe00p2XuCJsbRFqN6g-AAUAnx53LwKqHm1Bl8,1438
24
- kinemotion/core/pipeline_utils.py,sha256=FzfdKNhM0eK9Y5wbNP9Jab_nmrZxcJfL3cstpO4yfxc,15155
25
- kinemotion/core/pose.py,sha256=Z795p0EnaTUeWHO8FuApFcMGTLwZ47JOjs5f5TzRvdk,14224
26
- kinemotion/core/pose_landmarks.py,sha256=LcEbL5K5xKia6dCzWf6Ft18UIE1CLMMqCZ3KUjwUDzM,1558
27
- kinemotion/core/quality.py,sha256=VUkRL2N6B7lfIZ2pE9han_U68JwarmZz1U0ygHkgkhE,13022
28
- kinemotion/core/smoothing.py,sha256=F1DCsnvPBi62XJLygOJ5MkNlRa7BCLg_E9ORtCWcoKk,16562
29
- kinemotion/core/timing.py,sha256=ITX77q4hbtajRuWfgwYhws8nCvOeKFlEdKjCu8lD9_w,7938
30
- kinemotion/core/types.py,sha256=m141buSkEsqflt5VFaTHtRq_IcimjI3_T_EfaNpIVxY,1652
31
- kinemotion/core/validation.py,sha256=rrhpI24Iq8WGtNaMg0beTWMbEGccdKF-f-pk-FCKJzI,6749
32
- kinemotion/core/video_io.py,sha256=84IxC1n3HvYK28MSa5fqumdzlPDhP8k9IPB3OCvWku0,9198
33
- kinemotion/dj/__init__.py,sha256=yBbEbPdY6sqozWtTvfbvuUZnrVWSSjBp61xK34M29F4,878
34
- kinemotion/dj/analysis.py,sha256=dR5Dqxo_ub9EAOR95oPI4oJKtIofSH0EodopuoywsO8,33339
35
- kinemotion/dj/api.py,sha256=v-T-VurOoOIAWVyfR5IUCnUc4bHjBuxB2pP8qJG7TLs,20799
36
- kinemotion/dj/cli.py,sha256=FaBX637x7VcLcB8HupaZCkVS7sp8C0YuaKM0h-DBNIA,15906
37
- kinemotion/dj/debug_overlay.py,sha256=X4mvCi5Qi1gnvSZZAsUs-0ZRUx9mVBbEUznOFO21HO8,8470
38
- kinemotion/dj/kinematics.py,sha256=1K291z-PeJTqJbJDeIKWat90mVbxxh4B4hjz-nTFk88,22618
39
- kinemotion/dj/metrics_validator.py,sha256=BZbqareRaIfCcehTUvNPO3xzkq4X27xDD867e_w7Fmo,9237
40
- kinemotion/dj/validation_bounds.py,sha256=k31qy-kCXTiCTx0RPo2t8yZ-faLxqGO-AeF05QfBFb0,5125
41
- kinemotion/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
- kinemotion/models/pose_landmarker_lite.task,sha256=WZKeHR7pUodzXd2DOxnPSsRtKbx6_du_Z1PEWWkNV0o,5777746
43
- kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx,sha256=dfZTq8kbhv8RxWiXS0HUIJNCUpxYTBN45dFIorPflEs,133
44
- kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx,sha256=UsutHVQ6GP3X5pCcp52EN8q7o2J3d-TnxZqlF48kY6I,133
45
- kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- kinemotion-0.72.1.dist-info/METADATA,sha256=yZfreLGtqNencpusq6HrXv4mrVCTZ4tQwAAYWCdWu6U,26125
47
- kinemotion-0.72.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
48
- kinemotion-0.72.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
49
- kinemotion-0.72.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
50
- kinemotion-0.72.1.dist-info/RECORD,,
File without changes