kinemotion 0.71.1__py3-none-any.whl → 0.72.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

kinemotion/__init__.py CHANGED
@@ -14,7 +14,7 @@ from .api import (
14
14
  process_dropjump_videos_bulk,
15
15
  )
16
16
  from .cmj.kinematics import CMJMetrics
17
- from .dropjump.kinematics import DropJumpMetrics
17
+ from .dj.kinematics import DropJumpMetrics
18
18
 
19
19
  # Get version from package metadata (set in pyproject.toml)
20
20
  try:
kinemotion/api.py CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  This module provides a unified interface for both drop jump and CMJ video analysis.
4
4
  The actual implementations have been moved to their respective submodules:
5
- - Drop jump: kinemotion.dropjump.api
5
+ - Drop jump: kinemotion.dj.api
6
6
  - CMJ: kinemotion.cmj.api
7
7
 
8
8
  """
@@ -20,7 +20,7 @@ from .cmj.api import (
20
20
  from .cmj.kinematics import CMJMetrics
21
21
 
22
22
  # Drop jump API
23
- from .dropjump.api import (
23
+ from .dj.api import (
24
24
  AnalysisOverrides,
25
25
  DropJumpVideoConfig,
26
26
  DropJumpVideoResult,
kinemotion/cli.py CHANGED
@@ -3,7 +3,7 @@
3
3
  import click
4
4
 
5
5
  from .cmj.cli import cmj_analyze
6
- from .dropjump.cli import dropjump_analyze
6
+ from .dj.cli import dropjump_analyze
7
7
 
8
8
 
9
9
  @click.group()
@@ -170,7 +170,10 @@ def auto_tune_parameters(
170
170
  quality_adj = _QUALITY_ADJUSTMENTS[quality]
171
171
 
172
172
  # Compute FPS-based baseline parameters
173
- base_velocity_threshold = 0.004 * (30.0 / fps)
173
+ # Base velocity threshold: 0.012 at 30fps, scaled inversely by fps
174
+ # Must exceed typical MediaPipe landmark jitter (0.5-2% per frame)
175
+ # Previous value of 0.004 was below noise floor, causing false IN_AIR detections
176
+ base_velocity_threshold = 0.012 * (30.0 / fps)
174
177
  base_min_contact_frames = max(2, round(3.0 * (fps / 30.0)))
175
178
 
176
179
  # Smoothing window: Decrease with higher fps for better temporal resolution
@@ -9,7 +9,7 @@ import cv2
9
9
  import numpy as np
10
10
 
11
11
  from ..cmj.analysis import compute_average_hip_position
12
- from ..dropjump.analysis import compute_average_foot_position
12
+ from ..dj.analysis import compute_average_foot_position
13
13
  from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
14
14
  from .pose import MediaPipePoseTracker
15
15
  from .smoothing import smooth_landmarks, smooth_landmarks_advanced
@@ -334,6 +334,48 @@ def _assign_contact_states(
334
334
  return states
335
335
 
336
336
 
337
+ def _compute_near_ground_mask(
338
+ foot_positions: FloatArray,
339
+ height_tolerance: float = 0.35,
340
+ ) -> BoolArray:
341
+ """Compute mask for frames where feet are near ground level.
342
+
343
+ Uses position-based filtering to identify frames near ground baseline.
344
+ In normalized coordinates: y=1 is bottom (ground), y=0 is top.
345
+
346
+ The ground baseline is established as the 90th percentile of positions,
347
+ which represents the typical ground level while handling outliers.
348
+
349
+ The tolerance is set at 35% of the position range by default, which is
350
+ generous enough to capture the full reactive contact phase (where athletes
351
+ maintain an athletic stance) while still filtering out the jump apex
352
+ (where y is much lower than ground level).
353
+
354
+ Args:
355
+ foot_positions: Array of foot y-positions (normalized, 0-1)
356
+ height_tolerance: Fraction of position range allowed above ground (default 35%)
357
+
358
+ Returns:
359
+ Boolean array where True indicates frame is near ground level
360
+ """
361
+ # Ground baseline: 90th percentile (where feet are typically on ground)
362
+ # Using 90th instead of 95th to be less sensitive to final landing positions
363
+ ground_baseline = float(np.percentile(foot_positions, 90))
364
+
365
+ # Compute position range for tolerance calculation
366
+ position_range = float(np.max(foot_positions) - np.min(foot_positions))
367
+
368
+ # Minimum absolute tolerance to handle small movements
369
+ min_tolerance = 0.03 # 3% of normalized range
370
+
371
+ # Height tolerance: percentage of position range or minimum
372
+ tolerance = max(position_range * height_tolerance, min_tolerance)
373
+
374
+ # Frames are near ground if y >= ground_baseline - tolerance
375
+ # (Remember: higher y = closer to ground in normalized coords)
376
+ return foot_positions >= (ground_baseline - tolerance)
377
+
378
+
337
379
  def detect_ground_contact(
338
380
  foot_positions: FloatArray,
339
381
  velocity_threshold: float = 0.02,
@@ -343,13 +385,14 @@ def detect_ground_contact(
343
385
  window_length: int = 5,
344
386
  polyorder: int = 2,
345
387
  timer: Timer | None = None,
388
+ height_tolerance: float = 0.35,
346
389
  ) -> list[ContactState]:
347
390
  """
348
- Detect when feet are in contact with ground based on vertical motion.
391
+ Detect when feet are in contact with ground based on vertical motion AND position.
349
392
 
350
- Uses derivative-based velocity calculation via Savitzky-Goyal filter for smooth,
351
- accurate velocity estimates. This is consistent with the velocity calculation used
352
- throughout the pipeline for sub-frame interpolation and curvature analysis.
393
+ Uses derivative-based velocity calculation via Savitzky-Golay filter for smooth,
394
+ accurate velocity estimates. Additionally uses position-based filtering to prevent
395
+ false ON_GROUND classification at jump apex where velocity approaches zero.
353
396
 
354
397
  Args:
355
398
  foot_positions: Array of foot y-positions (normalized, 0-1, where 1 is bottom)
@@ -360,6 +403,7 @@ def detect_ground_contact(
360
403
  window_length: Window size for velocity derivative calculation (must be odd)
361
404
  polyorder: Polynomial order for Savitzky-Golay filter (default: 2)
362
405
  timer: Optional Timer for measuring operations
406
+ height_tolerance: Fraction of position range to allow above ground baseline (default 35%)
363
407
 
364
408
  Returns:
365
409
  List of ContactState for each frame
@@ -379,6 +423,14 @@ def detect_ground_contact(
379
423
  # Detect stationary frames based on velocity threshold
380
424
  is_stationary = np.abs(velocities) < velocity_threshold
381
425
 
426
+ # Position-based filtering to prevent false ON_GROUND at jump apex
427
+ # In normalized coords: y=1 is bottom (ground), y=0 is top
428
+ # Ground baseline is the 95th percentile (handles outliers)
429
+ is_near_ground = _compute_near_ground_mask(foot_positions, height_tolerance)
430
+
431
+ # Both conditions must be true: low velocity AND near ground
432
+ is_stationary = is_stationary & is_near_ground
433
+
382
434
  # Apply visibility filter
383
435
  is_stationary = _filter_stationary_with_visibility(
384
436
  is_stationary, visibilities, visibility_threshold
@@ -716,25 +768,28 @@ def find_landing_from_acceleration(
716
768
  accelerations: FloatArray,
717
769
  takeoff_frame: int,
718
770
  fps: float,
719
- search_duration: float = 0.7,
771
+ search_duration: float = 1.5,
720
772
  ) -> int:
721
773
  """
722
- Find landing frame by detecting impact acceleration after takeoff.
774
+ Find landing frame using position-based detection with acceleration refinement.
775
+
776
+ Primary method: Find when feet return to near-takeoff level after peak.
777
+ Secondary: Refine with acceleration spike if present.
723
778
 
724
- Detects the moment of initial ground contact, characterized by a sharp
725
- deceleration (positive acceleration spike) as downward velocity is arrested.
779
+ For drop jumps, landing is defined as the first ground contact after the
780
+ reactive jump, when feet return to approximately the same level as takeoff.
726
781
 
727
782
  Args:
728
- positions: Array of vertical positions (normalized 0-1)
783
+ positions: Array of vertical positions (normalized 0-1, where higher = closer to ground)
729
784
  accelerations: Array of accelerations (second derivative)
730
785
  takeoff_frame: Frame at takeoff (end of ground contact)
731
786
  fps: Video frame rate
732
- search_duration: Duration in seconds to search for landing (default: 0.7s)
787
+ search_duration: Duration in seconds to search for landing (default: 1.5s)
733
788
 
734
789
  Returns:
735
790
  Landing frame index (integer)
736
791
  """
737
- # Find peak height (minimum y value = highest point)
792
+ # Extended search window to capture full flight
738
793
  search_start = takeoff_frame
739
794
  search_end = min(len(positions), takeoff_frame + int(fps * search_duration))
740
795
 
@@ -742,52 +797,91 @@ def find_landing_from_acceleration(
742
797
  return min(len(positions) - 1, takeoff_frame + int(fps * 0.3))
743
798
 
744
799
  flight_positions = positions[search_start:search_end]
800
+
801
+ # Find peak height (minimum y value = highest point)
745
802
  peak_idx = int(np.argmin(flight_positions))
746
803
  peak_frame = search_start + peak_idx
747
804
 
748
- # After peak, look for landing (impact with ground)
749
- # Landing is detected by maximum positive acceleration (deceleration on impact)
750
- landing_search_start = peak_frame + 2
751
- landing_search_end = min(len(accelerations), landing_search_start + int(fps * 0.6))
805
+ # Get takeoff position as reference for landing detection
806
+ takeoff_position = positions[takeoff_frame]
807
+
808
+ # Position-based landing: find first frame after peak where position
809
+ # returns to within 5% of takeoff level (or 95% of the way back)
810
+ landing_threshold = takeoff_position - 0.05 * (takeoff_position - positions[peak_frame])
752
811
 
753
- if landing_search_end <= landing_search_start:
754
- return min(len(positions) - 1, peak_frame + int(fps * 0.2))
812
+ # Search for landing after peak
813
+ landing_frame = None
814
+ for i in range(peak_frame + 2, min(len(positions), search_end)):
815
+ if positions[i] >= landing_threshold:
816
+ landing_frame = i
817
+ break
755
818
 
756
- # Find impact: maximum negative acceleration after peak (deceleration on impact)
757
- # The impact creates a large upward force (negative acceleration in Y-down)
758
- landing_accelerations = accelerations[landing_search_start:landing_search_end]
759
- impact_idx = int(np.argmin(landing_accelerations))
760
- landing_frame = landing_search_start + impact_idx
819
+ # If position-based detection fails, use end of search window
820
+ if landing_frame is None:
821
+ landing_frame = min(len(positions) - 1, search_end - 1)
822
+
823
+ # Refine with acceleration if there's a clear impact spike
824
+ # Look for significant acceleration in a small window around the position-based landing
825
+ refine_start = max(peak_frame + 2, landing_frame - int(fps * 0.1))
826
+ refine_end = min(len(accelerations), landing_frame + int(fps * 0.1))
827
+
828
+ if refine_end > refine_start:
829
+ window_accelerations = accelerations[refine_start:refine_end]
830
+ # Check if there's a significant acceleration spike (> 3x median)
831
+ median_acc = float(np.median(np.abs(window_accelerations)))
832
+ max_acc_idx = int(np.argmax(np.abs(window_accelerations)))
833
+ max_acc = float(np.abs(window_accelerations[max_acc_idx]))
834
+
835
+ if median_acc > 0 and max_acc > 3 * median_acc:
836
+ # Use acceleration-refined landing frame
837
+ landing_frame = refine_start + max_acc_idx
761
838
 
762
839
  return landing_frame
763
840
 
764
841
 
765
842
  def compute_average_foot_position(
766
843
  landmarks: dict[str, tuple[float, float, float]],
844
+ visibility_threshold: float = 0.5,
767
845
  ) -> tuple[float, float]:
768
846
  """
769
847
  Compute average foot position from ankle and foot landmarks.
770
848
 
849
+ Uses tiered visibility approach to avoid returning center (0.5, 0.5)
850
+ which can cause false phase transitions in contact detection.
851
+
771
852
  Args:
772
853
  landmarks: Dictionary of landmark positions
854
+ visibility_threshold: Minimum visibility to include landmark (default: 0.5)
773
855
 
774
856
  Returns:
775
857
  (x, y) average foot position in normalized coordinates
776
858
  """
777
- x_positions = []
778
- y_positions = []
779
-
859
+ # Collect all foot landmarks with their visibility
860
+ foot_data: list[tuple[float, float, float]] = []
780
861
  for key in FOOT_KEYS:
781
862
  if key in landmarks:
782
863
  x, y, visibility = landmarks[key]
783
- if visibility > 0.5: # Only use visible landmarks
784
- x_positions.append(x)
785
- y_positions.append(y)
786
-
787
- if not x_positions:
788
- return (0.5, 0.5) # Default to center if no visible feet
789
-
790
- return (float(np.mean(x_positions)), float(np.mean(y_positions)))
864
+ foot_data.append((x, y, visibility))
865
+
866
+ if not foot_data:
867
+ # No foot landmarks at all - return center as last resort
868
+ return (0.5, 0.5)
869
+
870
+ # Tier 1: Use landmarks above visibility threshold
871
+ high_vis = [(x, y) for x, y, v in foot_data if v > visibility_threshold]
872
+ if high_vis:
873
+ xs, ys = zip(*high_vis, strict=False)
874
+ return (float(np.mean(xs)), float(np.mean(ys)))
875
+
876
+ # Tier 2: Use landmarks with any reasonable visibility (> 0.1)
877
+ low_vis = [(x, y) for x, y, v in foot_data if v > 0.1]
878
+ if low_vis:
879
+ xs, ys = zip(*low_vis, strict=False)
880
+ return (float(np.mean(xs)), float(np.mean(ys)))
881
+
882
+ # Tier 3: Use highest visibility landmark regardless of threshold
883
+ best = max(foot_data, key=lambda t: t[2])
884
+ return (best[0], best[1])
791
885
 
792
886
 
793
887
  def _calculate_average_visibility(
@@ -52,6 +52,14 @@ from .debug_overlay import DropJumpDebugOverlayRenderer
52
52
  from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
53
53
  from .metrics_validator import DropJumpMetricsValidator
54
54
 
55
+ __all__ = [
56
+ "AnalysisOverrides",
57
+ "DropJumpVideoConfig",
58
+ "DropJumpVideoResult",
59
+ "process_dropjump_video",
60
+ "process_dropjump_videos_bulk",
61
+ ]
62
+
55
63
 
56
64
  @dataclass
57
65
  class AnalysisOverrides:
@@ -234,6 +234,16 @@ def _identify_main_contact_phase(
234
234
  ) -> tuple[int, int, bool]:
235
235
  """Identify the main contact phase and determine if it's a drop jump.
236
236
 
237
+ Drop jump detection strategy:
238
+ 1. With position-based filtering, box period is classified as IN_AIR
239
+ 2. Pattern: IN_AIR(box+drop) → ON_GROUND(contact) → IN_AIR(flight) → ON_GROUND(land)
240
+ 3. The FIRST ground phase is the contact phase (before the flight)
241
+ 4. The LAST ground phase is the landing (after the flight)
242
+
243
+ The key differentiator from regular jump:
244
+ - Drop jump: starts with IN_AIR, has 2+ ground phases with air between them
245
+ - Regular jump: starts with ON_GROUND, may have multiple phases
246
+
237
247
  Args:
238
248
  phases: All phase tuples
239
249
  ground_phases: Ground phases with indices
@@ -247,34 +257,43 @@ def _identify_main_contact_phase(
247
257
  contact_start, contact_end = ground_phases[0][0], ground_phases[0][1]
248
258
  is_drop_jump = False
249
259
 
250
- # Detect if this is a drop jump or regular jump
260
+ # Check if this looks like a drop jump pattern:
261
+ # Pattern: starts with IN_AIR → ON_GROUND → IN_AIR → ON_GROUND
251
262
  if air_phases_indexed and len(ground_phases) >= 2:
263
+ _, _, first_air_idx = air_phases_indexed[0]
252
264
  first_ground_start, first_ground_end, first_ground_idx = ground_phases[0]
253
- first_air_idx = air_phases_indexed[0][2]
254
-
255
- # Find ground phase after first air phase
256
- ground_after_air = [
257
- (start, end, idx) for start, end, idx in ground_phases if idx > first_air_idx
258
- ]
259
-
260
- if ground_after_air and first_ground_idx < first_air_idx:
261
- # Check if first ground is at higher elevation (lower y) than
262
- # ground after air using robust temporal averaging
263
- first_ground_y = _compute_robust_phase_position(
264
- foot_y_positions, first_ground_start, first_ground_end
265
- )
266
- second_ground_start, second_ground_end, _ = ground_after_air[0]
267
- second_ground_y = _compute_robust_phase_position(
268
- foot_y_positions, second_ground_start, second_ground_end
269
- )
270
-
271
- # If first ground is significantly higher (>7% of frame), it's a drop jump
272
- # Increased from 0.05 to 0.07 with 11-frame temporal averaging
273
- # for reproducibility (balances detection sensitivity with noise robustness)
274
- # Note: MediaPipe has inherent non-determinism (Google issue #3945)
275
- if second_ground_y - first_ground_y > 0.07:
265
+
266
+ # Drop jump pattern: first phase is IN_AIR (athlete on box/dropping)
267
+ # followed by ground contact, then flight, then landing
268
+ if first_air_idx == 0 and first_ground_idx == 1:
269
+ # First phase is air (box + drop), second phase is ground (contact)
270
+ # Check if there's a flight phase after contact
271
+ air_after_contact = [
272
+ (s, e, i) for s, e, i in air_phases_indexed if i > first_ground_idx
273
+ ]
274
+ if air_after_contact:
275
+ # This is a drop jump: first ground = contact, last ground = landing
276
276
  is_drop_jump = True
277
- contact_start, contact_end = second_ground_start, second_ground_end
277
+ contact_start, contact_end = first_ground_start, first_ground_end
278
+
279
+ # Legacy detection: first ground is on elevated box (lower y)
280
+ # This handles cases where box level IS detected as ground
281
+ if not is_drop_jump and first_ground_idx < first_air_idx:
282
+ ground_after_air = [
283
+ (start, end, idx) for start, end, idx in ground_phases if idx > first_air_idx
284
+ ]
285
+ if ground_after_air:
286
+ first_ground_y = _compute_robust_phase_position(
287
+ foot_y_positions, first_ground_start, first_ground_end
288
+ )
289
+ second_ground_start, second_ground_end, _ = ground_after_air[0]
290
+ second_ground_y = _compute_robust_phase_position(
291
+ foot_y_positions, second_ground_start, second_ground_end
292
+ )
293
+ # If first ground is significantly higher (>7% of frame), it's a drop jump
294
+ if second_ground_y - first_ground_y > 0.07:
295
+ is_drop_jump = True
296
+ contact_start, contact_end = second_ground_start, second_ground_end
278
297
 
279
298
  if not is_drop_jump:
280
299
  # Regular jump: use longest ground contact phase
@@ -317,6 +336,30 @@ def _find_precise_phase_timing(
317
336
  return contact_start_frac, contact_end_frac
318
337
 
319
338
 
339
+ def _find_landing_from_phases(
340
+ phases: list[tuple[int, int, ContactState]],
341
+ flight_start: int,
342
+ ) -> int | None:
343
+ """Find landing frame from phase detection.
344
+
345
+ Looks for the first ON_GROUND phase that starts after the flight_start frame.
346
+ This represents the first ground contact after the reactive jump.
347
+
348
+ Args:
349
+ phases: List of (start, end, state) phase tuples
350
+ flight_start: Frame where flight begins (takeoff)
351
+
352
+ Returns:
353
+ Landing frame (start of landing phase), or None if not found
354
+ """
355
+ for start, _, state in phases:
356
+ if state == ContactState.ON_GROUND and start > flight_start:
357
+ # Found the landing phase - return its start frame
358
+ return start
359
+
360
+ return None
361
+
362
+
320
363
  def _analyze_flight_phase(
321
364
  metrics: DropJumpMetrics,
322
365
  phases: list[tuple[int, int, ContactState]],
@@ -345,22 +388,20 @@ def _analyze_flight_phase(
345
388
  # Find takeoff frame (end of ground contact)
346
389
  flight_start = contact_end
347
390
 
348
- # Compute accelerations for landing detection
349
- accelerations = compute_acceleration_from_derivative(
350
- foot_y_positions, window_length=smoothing_window, polyorder=polyorder
351
- )
391
+ # Use phase detection for landing (more accurate than position-based)
392
+ # Find the next ON_GROUND phase after the flight phase
393
+ flight_end = _find_landing_from_phases(phases, flight_start)
352
394
 
353
- # Use acceleration-based landing detection (like CMJ)
354
- # This finds the actual ground impact, not just when velocity drops
355
- flight_end = find_landing_from_acceleration(
356
- foot_y_positions, accelerations, flight_start, fps, search_duration=0.7
357
- )
358
-
359
- # Store integer frame indices
360
- metrics.flight_start_frame = flight_start
361
- metrics.flight_end_frame = flight_end
395
+ # If phase detection fails, fall back to position-based detection
396
+ if flight_end is None:
397
+ accelerations = compute_acceleration_from_derivative(
398
+ foot_y_positions, window_length=smoothing_window, polyorder=polyorder
399
+ )
400
+ flight_end = find_landing_from_acceleration(
401
+ foot_y_positions, accelerations, flight_start, fps
402
+ )
362
403
 
363
- # Find precise sub-frame timing for takeoff
404
+ # Find precise sub-frame timing for takeoff and landing
364
405
  flight_start_frac = float(flight_start)
365
406
  flight_end_frac = float(flight_end)
366
407
 
@@ -373,6 +414,20 @@ def _analyze_flight_phase(
373
414
  flight_start_frac = end_frac
374
415
  break
375
416
 
417
+ # Find interpolated landing (start of landing ON_GROUND phase)
418
+ for start_frac, _, state in interpolated_phases:
419
+ if state == ContactState.ON_GROUND and int(start_frac) >= flight_end - 2:
420
+ flight_end_frac = start_frac
421
+ break
422
+
423
+ # Refine landing frame using floor of interpolated value
424
+ # This compensates for velocity-based detection being ~1-2 frames late
425
+ refined_flight_end = int(np.floor(flight_end_frac))
426
+
427
+ # Store integer frame indices (refined using interpolated values)
428
+ metrics.flight_start_frame = flight_start
429
+ metrics.flight_end_frame = refined_flight_end
430
+
376
431
  # Calculate flight time
377
432
  flight_frames_precise = flight_end_frac - flight_start_frac
378
433
  metrics.flight_time = flight_frames_precise / fps
@@ -497,15 +552,22 @@ def calculate_drop_jump_metrics(
497
552
  phases, ground_phases, air_phases_indexed, foot_y_positions
498
553
  )
499
554
 
500
- # Store integer frame indices
501
- metrics.contact_start_frame = contact_start
502
- metrics.contact_end_frame = contact_end
503
-
504
- # Find precise timing for contact phase
555
+ # Find precise timing for contact phase (uses curvature refinement)
505
556
  contact_start_frac, contact_end_frac = _find_precise_phase_timing(
506
557
  contact_start, contact_end, interpolated_phases
507
558
  )
508
559
 
560
+ # Refine contact_start using floor of interpolated value
561
+ # This compensates for velocity-based detection being ~1-2 frames late
562
+ # because velocity settles AFTER initial impact. Using floor() biases
563
+ # toward earlier detection, matching the moment of first ground contact.
564
+ refined_contact_start = int(np.floor(contact_start_frac))
565
+
566
+ # Store integer frame indices (refined start, raw end)
567
+ # Contact end (takeoff) uses raw value as velocity-based detection is accurate
568
+ metrics.contact_start_frame = refined_contact_start
569
+ metrics.contact_end_frame = contact_end
570
+
509
571
  # Calculate ground contact time
510
572
  contact_frames_precise = contact_end_frac - contact_start_frac
511
573
  metrics.ground_contact_time = contact_frames_precise / fps
@@ -14,7 +14,7 @@ from kinemotion.core.validation import (
14
14
  MetricsValidator,
15
15
  ValidationResult,
16
16
  )
17
- from kinemotion.dropjump.validation_bounds import (
17
+ from kinemotion.dj.validation_bounds import (
18
18
  DropJumpBounds,
19
19
  estimate_athlete_profile,
20
20
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.71.1
3
+ Version: 0.72.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,6 +1,6 @@
1
- kinemotion/__init__.py,sha256=HkD8habCcfxGobxZcACOStla-L1nYHMIZp0th00Q3E8,1061
2
- kinemotion/api.py,sha256=uG1e4bTnj2c-6cbZJEZ_LjMwFdaG32ba2KcK_XjE_NI,1040
3
- kinemotion/cli.py,sha256=_Us9krSce4GUKtlLIPrFUhKmPWURzeJ1-ydR_YU2VGw,626
1
+ kinemotion/__init__.py,sha256=jlhHJlxZJbp7CPfRWMlxwfDISBCdHadjigmXvcahXxU,1055
2
+ kinemotion/api.py,sha256=1GMi_7SbU3EyDdDRtnF55roB8iMR0t91qGOkqa4E6sI,1028
3
+ kinemotion/cli.py,sha256=ugrc1Dpx7abYGEyIDcslzNu8KR0VoOoGGUGUm87E_kQ,620
4
4
  kinemotion/cmj/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
5
5
  kinemotion/cmj/analysis.py,sha256=EQydClIbNkIj-FmCZGaPQe-COVW8fbO3139i9z1vomA,23643
6
6
  kinemotion/cmj/api.py,sha256=P_lbqEqAKPO5n1Xn4IQZKNj9nLaO3ljkN2PgqvExGXU,18435
@@ -11,7 +11,7 @@ kinemotion/cmj/kinematics.py,sha256=KwA8uSj3g1SeNf0NXMSHsp3gIw6Gfa-6QWIwdYdRXYw,
11
11
  kinemotion/cmj/metrics_validator.py,sha256=IQofafpwLCXER3ucZXNfiJKFFKPOVxXnC4BNLHOMnNY,30013
12
12
  kinemotion/cmj/validation_bounds.py,sha256=-0iXDhH-RntiGZi_Co22V6qtA5D-hLzkrPkVcfoNd2U,11343
13
13
  kinemotion/core/__init__.py,sha256=8hMvfNK7v_eqswuk_J5s5FRGvPtp2-R4kasVMGchFkM,1766
14
- kinemotion/core/auto_tuning.py,sha256=dF2opupuphbTd6sZIDyXX8hwedLaNlMiH-hT7PGqnfU,10251
14
+ kinemotion/core/auto_tuning.py,sha256=rliPTLueMbOjYRb4hjb0af7DVMtxLT92wpnVve75GvA,10478
15
15
  kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
16
16
  kinemotion/core/debug_overlay_utils.py,sha256=QaVkHuFZpXUrdiMlm8ylQn6baJOj8jcZeiV4kDqODt0,17441
17
17
  kinemotion/core/determinism.py,sha256=Frw-KAOvAxTL_XtxoWpXCjMbQPUKEAusK6JctlkeuRo,2509
@@ -21,7 +21,7 @@ kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU
21
21
  kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
22
22
  kinemotion/core/model_downloader.py,sha256=mqhJBHGaNe0aN9qbcBqvcTk9FDd7xaHqEcwD-fyP89c,5205
23
23
  kinemotion/core/overlay_constants.py,sha256=zZreHHWe00p2XuCJsbRFqN6g-AAUAnx53LwKqHm1Bl8,1438
24
- kinemotion/core/pipeline_utils.py,sha256=B5jMXoiLaTh02uGA2MIe1uZLVSRGZ5nxbARuvdrjDrQ,15161
24
+ kinemotion/core/pipeline_utils.py,sha256=FzfdKNhM0eK9Y5wbNP9Jab_nmrZxcJfL3cstpO4yfxc,15155
25
25
  kinemotion/core/pose.py,sha256=Z795p0EnaTUeWHO8FuApFcMGTLwZ47JOjs5f5TzRvdk,14224
26
26
  kinemotion/core/pose_landmarks.py,sha256=LcEbL5K5xKia6dCzWf6Ft18UIE1CLMMqCZ3KUjwUDzM,1558
27
27
  kinemotion/core/quality.py,sha256=VUkRL2N6B7lfIZ2pE9han_U68JwarmZz1U0ygHkgkhE,13022
@@ -30,21 +30,21 @@ kinemotion/core/timing.py,sha256=ITX77q4hbtajRuWfgwYhws8nCvOeKFlEdKjCu8lD9_w,793
30
30
  kinemotion/core/types.py,sha256=m141buSkEsqflt5VFaTHtRq_IcimjI3_T_EfaNpIVxY,1652
31
31
  kinemotion/core/validation.py,sha256=rrhpI24Iq8WGtNaMg0beTWMbEGccdKF-f-pk-FCKJzI,6749
32
32
  kinemotion/core/video_io.py,sha256=84IxC1n3HvYK28MSa5fqumdzlPDhP8k9IPB3OCvWku0,9198
33
- kinemotion/dropjump/__init__.py,sha256=yBbEbPdY6sqozWtTvfbvuUZnrVWSSjBp61xK34M29F4,878
34
- kinemotion/dropjump/analysis.py,sha256=Tc41jVctG4zJZOyYqM1SiM95mnF2xz4vcieGJ6vYi2M,29099
35
- kinemotion/dropjump/api.py,sha256=5qBj05e6Zo-H4-UjBOIt_CYyDqLVcPhwyyLG04eJYMU,20639
36
- kinemotion/dropjump/cli.py,sha256=FaBX637x7VcLcB8HupaZCkVS7sp8C0YuaKM0h-DBNIA,15906
37
- kinemotion/dropjump/debug_overlay.py,sha256=X4mvCi5Qi1gnvSZZAsUs-0ZRUx9mVBbEUznOFO21HO8,8470
38
- kinemotion/dropjump/kinematics.py,sha256=dx4PuXKfKMKcsc_HX6sXj8rHXf9ksiZIOAIkJ4vBlY4,19637
39
- kinemotion/dropjump/metrics_validator.py,sha256=lSfo4Lm5FHccl8ijUP6SA-kcSh50LS9hF8UIyWxcnW8,9243
40
- kinemotion/dropjump/validation_bounds.py,sha256=k31qy-kCXTiCTx0RPo2t8yZ-faLxqGO-AeF05QfBFb0,5125
33
+ kinemotion/dj/__init__.py,sha256=yBbEbPdY6sqozWtTvfbvuUZnrVWSSjBp61xK34M29F4,878
34
+ kinemotion/dj/analysis.py,sha256=dR5Dqxo_ub9EAOR95oPI4oJKtIofSH0EodopuoywsO8,33339
35
+ kinemotion/dj/api.py,sha256=v-T-VurOoOIAWVyfR5IUCnUc4bHjBuxB2pP8qJG7TLs,20799
36
+ kinemotion/dj/cli.py,sha256=FaBX637x7VcLcB8HupaZCkVS7sp8C0YuaKM0h-DBNIA,15906
37
+ kinemotion/dj/debug_overlay.py,sha256=X4mvCi5Qi1gnvSZZAsUs-0ZRUx9mVBbEUznOFO21HO8,8470
38
+ kinemotion/dj/kinematics.py,sha256=1K291z-PeJTqJbJDeIKWat90mVbxxh4B4hjz-nTFk88,22618
39
+ kinemotion/dj/metrics_validator.py,sha256=BZbqareRaIfCcehTUvNPO3xzkq4X27xDD867e_w7Fmo,9237
40
+ kinemotion/dj/validation_bounds.py,sha256=k31qy-kCXTiCTx0RPo2t8yZ-faLxqGO-AeF05QfBFb0,5125
41
41
  kinemotion/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  kinemotion/models/pose_landmarker_lite.task,sha256=WZKeHR7pUodzXd2DOxnPSsRtKbx6_du_Z1PEWWkNV0o,5777746
43
43
  kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx,sha256=dfZTq8kbhv8RxWiXS0HUIJNCUpxYTBN45dFIorPflEs,133
44
44
  kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx,sha256=UsutHVQ6GP3X5pCcp52EN8q7o2J3d-TnxZqlF48kY6I,133
45
45
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- kinemotion-0.71.1.dist-info/METADATA,sha256=D2ZS9pNSi3NAaSkYKoQ09PZaJ-B3qWDHPLChu3oxdbY,26125
47
- kinemotion-0.71.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
48
- kinemotion-0.71.1.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
49
- kinemotion-0.71.1.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
50
- kinemotion-0.71.1.dist-info/RECORD,,
46
+ kinemotion-0.72.0.dist-info/METADATA,sha256=_OdCWsVJMVu8s2tebWVeW5XnFTi_aMZYVTw01XWN_p0,26125
47
+ kinemotion-0.72.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
48
+ kinemotion-0.72.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
49
+ kinemotion-0.72.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
50
+ kinemotion-0.72.0.dist-info/RECORD,,
File without changes
File without changes
File without changes
File without changes