kinemotion 0.67.0__py3-none-any.whl → 0.68.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,190 @@
1
+ """RTMPose wrapper for CUDA and CoreML acceleration.
2
+
3
+ This adapter wraps RTMLib's BodyWithFeet for CUDA (NVIDIA GPU) and CoreML (Apple Silicon)
4
+ acceleration, matching kinemotion's PoseTracker API.
5
+
6
+ Performance:
7
+ - CUDA (RTX 4070 Ti Super): 133 FPS (271% of MediaPipe)
8
+ - CoreML (M1 Pro): 42 FPS (94% of MediaPipe)
9
+ - Accuracy: 9-12px mean difference from MediaPipe
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import numpy as np
15
+ from rtmlib import BodyWithFeet
16
+
17
+ from kinemotion.core.timing import NULL_TIMER, Timer
18
+
19
+ # Halpe-26 to kinemotion landmark mapping
20
+ HALPE_TO_KINEMOTION = {
21
+ 0: "nose",
22
+ 5: "left_shoulder",
23
+ 6: "right_shoulder",
24
+ 11: "left_hip",
25
+ 12: "right_hip",
26
+ 13: "left_knee",
27
+ 14: "right_knee",
28
+ 15: "left_ankle",
29
+ 16: "right_ankle",
30
+ 20: "left_foot_index",
31
+ 21: "right_foot_index",
32
+ 24: "left_heel",
33
+ 25: "right_heel",
34
+ }
35
+
36
+
37
+ class RTMPoseWrapper:
38
+ """RTMPose wrapper for CUDA/CoreML acceleration.
39
+
40
+ Uses RTMLib's BodyWithFeet (Halpe-26 format) which provides all 13
41
+ kinemotion landmarks including feet.
42
+
43
+ Supports:
44
+ - device='cuda': NVIDIA GPU acceleration (fastest)
45
+ - device='mps': Apple Silicon CoreML acceleration
46
+ - device='cpu': Fallback (unoptimized, use rtmpose_cpu.py instead)
47
+
48
+ Attributes:
49
+ timer: Optional Timer for measuring operations
50
+ estimator: RTMLib BodyWithFeet estimator instance
51
+ mode: RTMLib mode ('lightweight', 'balanced', 'performance')
52
+ device: Target device ('cuda', 'mps', 'cpu')
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ min_detection_confidence: float = 0.5,
58
+ min_tracking_confidence: float = 0.5,
59
+ timer: Timer | None = None,
60
+ mode: str = "lightweight",
61
+ backend: str = "onnxruntime",
62
+ device: str = "cpu",
63
+ pose_input_size: tuple[int, int] | None = None,
64
+ ) -> None:
65
+ """Initialize the RTMPose wrapper.
66
+
67
+ Args:
68
+ min_detection_confidence: Minimum confidence for pose detection
69
+ min_tracking_confidence: Minimum confidence for pose tracking
70
+ timer: Optional Timer for measuring operations
71
+ mode: RTMLib performance mode:
72
+ - 'lightweight': Fastest, RTMPose-s
73
+ - 'balanced': Default mode
74
+ - 'performance': Best accuracy, RTMPose-m
75
+ backend: RTMLib backend ('onnxruntime', 'opencv')
76
+ device: RTMLib device ('cpu', 'cuda', 'mps')
77
+ pose_input_size: Custom input size as (height, width) tuple
78
+ """
79
+ self.timer = timer or NULL_TIMER
80
+ self.mode = mode
81
+ self.backend = backend
82
+ self.device = device
83
+
84
+ with self.timer.measure("rtmpose_wrapper_initialization"):
85
+ kwargs = {
86
+ "mode": mode,
87
+ "backend": backend,
88
+ "device": device,
89
+ }
90
+ if pose_input_size is not None:
91
+ kwargs["pose_input_size"] = pose_input_size # type: ignore[assignment]
92
+ self.estimator = BodyWithFeet(**kwargs) # type: ignore[arg-type]
93
+
94
+ def process_frame(
95
+ self, frame: np.ndarray, timestamp_ms: int = 0
96
+ ) -> dict[str, tuple[float, float, float]] | None:
97
+ """Process a single frame and extract pose landmarks.
98
+
99
+ Args:
100
+ frame: BGR image frame (OpenCV format)
101
+ timestamp_ms: Frame timestamp in milliseconds (unused, for API compatibility)
102
+
103
+ Returns:
104
+ Dictionary mapping landmark names to (x, y, visibility) tuples,
105
+ or None if no pose detected. Coordinates are normalized (0-1).
106
+ """
107
+ if frame.size == 0:
108
+ return None
109
+
110
+ height, width = frame.shape[:2]
111
+
112
+ # RTMLib expects RGB, but BodyWithFeet handles conversion internally
113
+ with self.timer.measure("rtmpose_inference"):
114
+ keypoints, scores = self.estimator(frame)
115
+
116
+ if keypoints.shape[0] == 0:
117
+ return None
118
+
119
+ # Extract first person's keypoints
120
+ with self.timer.measure("landmark_extraction"):
121
+ landmarks = self._extract_landmarks(keypoints[0], scores[0], width, height)
122
+
123
+ return landmarks
124
+
125
+ def _extract_landmarks(
126
+ self,
127
+ keypoints: np.ndarray,
128
+ scores: np.ndarray,
129
+ img_width: int,
130
+ img_height: int,
131
+ ) -> dict[str, tuple[float, float, float]]:
132
+ """Extract and convert RTMLib landmarks to MediaPipe format.
133
+
134
+ Args:
135
+ keypoints: (26, 2) array of pixel coordinates
136
+ scores: (26,) array of confidence scores
137
+ img_width: Image width for normalization
138
+ img_height: Image height for normalization
139
+
140
+ Returns:
141
+ Dictionary mapping kinemotion landmark names to normalized
142
+ (x, y, visibility) tuples.
143
+ """
144
+ landmarks = {}
145
+
146
+ for halpe_idx, name in HALPE_TO_KINEMOTION.items():
147
+ x_pixel, y_pixel = keypoints[halpe_idx]
148
+ confidence = float(scores[halpe_idx])
149
+
150
+ # Normalize to [0, 1] like MediaPipe
151
+ x_norm = float(x_pixel / img_width)
152
+ y_norm = float(y_pixel / img_height)
153
+
154
+ # Clamp to valid range
155
+ x_norm = max(0.0, min(1.0, x_norm))
156
+ y_norm = max(0.0, min(1.0, y_norm))
157
+
158
+ # Use confidence as visibility (MediaPipe compatibility)
159
+ landmarks[name] = (x_norm, y_norm, confidence)
160
+
161
+ return landmarks
162
+
163
+ def close(self) -> None:
164
+ """Release resources (no-op for RTMLib)."""
165
+ pass
166
+
167
+
168
+ def create_rtmpose_wrapper(
169
+ device: str = "cpu",
170
+ mode: str = "lightweight",
171
+ timer: Timer | None = None,
172
+ ) -> RTMPoseWrapper:
173
+ """Factory function to create an RTMPose wrapper.
174
+
175
+ Args:
176
+ device: Target device ('cuda', 'mps', 'cpu')
177
+ mode: Performance mode ('lightweight', 'balanced', 'performance')
178
+ timer: Optional Timer for measuring operations
179
+
180
+ Returns:
181
+ Configured RTMPoseWrapper instance
182
+
183
+ Example:
184
+ # CUDA (NVIDIA GPU)
185
+ tracker = create_rtmpose_wrapper(device='cuda')
186
+
187
+ # CoreML (Apple Silicon)
188
+ tracker = create_rtmpose_wrapper(device='mps')
189
+ """
190
+ return RTMPoseWrapper(device=device, mode=mode, timer=timer)
kinemotion/core/timing.py CHANGED
@@ -21,7 +21,8 @@ Example:
21
21
  metrics = timer.get_metrics()
22
22
 
23
23
  # Zero-overhead timing (disabled)
24
- tracker = PoseTracker(timer=NULL_TIMER)
24
+ from kinemotion.core import PoseTrackerFactory
25
+ tracker = PoseTrackerFactory.create(timer=NULL_TIMER)
25
26
  # No timing overhead, but maintains API compatibility
26
27
  """
27
28
 
@@ -102,7 +103,8 @@ class NullTimer:
102
103
 
103
104
  Example:
104
105
  # Use global singleton for zero allocation overhead
105
- tracker = PoseTracker(timer=NULL_TIMER)
106
+ from kinemotion.core import PoseTrackerFactory
107
+ tracker = PoseTrackerFactory.create(timer=NULL_TIMER)
106
108
 
107
109
  # No overhead - measure() call optimizes to nothing
108
110
  with tracker.timer.measure("operation"):
@@ -40,7 +40,7 @@ from ..core.pipeline_utils import (
40
40
  process_all_frames,
41
41
  process_videos_bulk_generic,
42
42
  )
43
- from ..core.pose import PoseTracker
43
+ from ..core.pose import MediaPipePoseTracker
44
44
  from ..core.quality import QualityAssessment, assess_jump_quality
45
45
  from ..core.timing import NULL_TIMER, PerformanceTimer, Timer
46
46
  from ..core.video_io import VideoProcessor
@@ -90,6 +90,7 @@ class DropJumpVideoConfig:
90
90
  overrides: AnalysisOverrides | None = None
91
91
  detection_confidence: float | None = None
92
92
  tracking_confidence: float | None = None
93
+ pose_backend: str | None = None
93
94
 
94
95
 
95
96
  def _assess_dropjump_quality(
@@ -217,7 +218,7 @@ def _print_dropjump_summary(
217
218
  timer: Timer,
218
219
  ) -> None:
219
220
  """Print verbose timing summary."""
220
- total_time = time.time() - start_time
221
+ total_time = time.perf_counter() - start_time
221
222
  stage_times = convert_timer_to_stage_names(timer.get_metrics())
222
223
 
223
224
  print("\n=== Timing Summary ===")
@@ -235,9 +236,11 @@ def _setup_pose_tracker(
235
236
  quality_preset: QualityPreset,
236
237
  detection_confidence: float | None,
237
238
  tracking_confidence: float | None,
238
- pose_tracker: "PoseTracker | None",
239
+ pose_tracker: "MediaPipePoseTracker | None",
240
+ pose_backend: str | None,
239
241
  timer: Timer,
240
- ) -> tuple["PoseTracker", bool]:
242
+ verbose: bool = False,
243
+ ) -> tuple["MediaPipePoseTracker", bool]:
241
244
  """Set up pose tracker and determine if it should be closed."""
242
245
  detection_conf, tracking_conf = determine_confidence_levels(
243
246
  quality_preset, detection_confidence, tracking_confidence
@@ -247,11 +250,29 @@ def _setup_pose_tracker(
247
250
  should_close_tracker = False
248
251
 
249
252
  if tracker is None:
250
- tracker = PoseTracker(
251
- min_detection_confidence=detection_conf,
252
- min_tracking_confidence=tracking_conf,
253
- timer=timer,
254
- )
253
+ if pose_backend is not None:
254
+ import time
255
+
256
+ from ..core import get_tracker_info
257
+ from ..core.pose import PoseTrackerFactory
258
+
259
+ init_start = time.perf_counter()
260
+ tracker = PoseTrackerFactory.create(
261
+ backend=pose_backend,
262
+ timer=timer,
263
+ )
264
+ init_time = time.perf_counter() - init_start
265
+
266
+ if verbose:
267
+ print(f"Using pose backend: {pose_backend}")
268
+ print(f" → {get_tracker_info(tracker)}")
269
+ print(f" → Initialized in {init_time * 1000:.1f} ms")
270
+ else:
271
+ tracker = MediaPipePoseTracker(
272
+ min_detection_confidence=detection_conf,
273
+ min_tracking_confidence=tracking_conf,
274
+ timer=timer,
275
+ )
255
276
  should_close_tracker = True
256
277
 
257
278
  return tracker, should_close_tracker
@@ -259,7 +280,7 @@ def _setup_pose_tracker(
259
280
 
260
281
  def _process_frames_and_landmarks(
261
282
  video: "VideoProcessor",
262
- tracker: "PoseTracker",
283
+ tracker: "MediaPipePoseTracker",
263
284
  should_close_tracker: bool,
264
285
  verbose: bool,
265
286
  timer: Timer,
@@ -487,7 +508,8 @@ def process_dropjump_video(
487
508
  tracking_confidence: float | None = None,
488
509
  verbose: bool = False,
489
510
  timer: Timer | None = None,
490
- pose_tracker: "PoseTracker | None" = None,
511
+ pose_tracker: "MediaPipePoseTracker | None" = None,
512
+ pose_backend: str | None = None,
491
513
  ) -> DropJumpMetrics:
492
514
  """
493
515
  Process a single drop jump video and return metrics.
@@ -521,7 +543,7 @@ def process_dropjump_video(
521
543
 
522
544
  set_deterministic_mode(seed=42)
523
545
 
524
- start_time = time.time()
546
+ start_time = time.perf_counter()
525
547
  timer = timer or PerformanceTimer()
526
548
  quality_preset = parse_quality_preset(quality)
527
549
 
@@ -532,7 +554,9 @@ def process_dropjump_video(
532
554
  detection_confidence,
533
555
  tracking_confidence,
534
556
  pose_tracker,
557
+ pose_backend,
535
558
  timer,
559
+ verbose,
536
560
  )
537
561
 
538
562
  frames, landmarks_sequence, frame_indices = _process_frames_and_landmarks(
@@ -584,7 +608,7 @@ def process_dropjump_video(
584
608
 
585
609
  _validate_metrics_and_print_results(metrics, timer, verbose)
586
610
 
587
- processing_time = time.time() - start_time
611
+ processing_time = time.perf_counter() - start_time
588
612
  result_metadata = _build_dropjump_metadata(
589
613
  video_path,
590
614
  video,
@@ -630,7 +654,7 @@ def process_dropjump_videos_bulk(
630
654
 
631
655
  def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
632
656
  """Wrapper function for parallel processing."""
633
- start_time = time.time()
657
+ start_time = time.perf_counter()
634
658
 
635
659
  try:
636
660
  metrics = process_dropjump_video(
@@ -645,7 +669,7 @@ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVide
645
669
  verbose=False,
646
670
  )
647
671
 
648
- processing_time = time.time() - start_time
672
+ processing_time = time.perf_counter() - start_time
649
673
 
650
674
  return DropJumpVideoResult(
651
675
  video_path=config.video_path,
@@ -655,7 +679,7 @@ def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVide
655
679
  )
656
680
 
657
681
  except Exception as e:
658
- processing_time = time.time() - start_time
682
+ processing_time = time.perf_counter() - start_time
659
683
 
660
684
  return DropJumpVideoResult(
661
685
  video_path=config.video_path,
@@ -31,6 +31,7 @@ class AnalysisParameters:
31
31
  visibility_threshold: float | None = None
32
32
  detection_confidence: float | None = None
33
33
  tracking_confidence: float | None = None
34
+ pose_backend: str | None = None
34
35
 
35
36
 
36
37
  @click.command(name="dropjump-analyze")
@@ -65,6 +66,23 @@ class AnalysisParameters:
65
66
  is_flag=True,
66
67
  help="Show auto-selected parameters and analysis details",
67
68
  )
69
+ @click.option(
70
+ "--pose-backend",
71
+ type=click.Choice(
72
+ ["auto", "mediapipe", "rtmpose-cpu", "rtmpose-cuda", "rtmpose-coreml"],
73
+ case_sensitive=False,
74
+ ),
75
+ default="auto",
76
+ help=(
77
+ "Pose tracking backend: "
78
+ "auto (detect best), "
79
+ "mediapipe (baseline), "
80
+ "rtmpose-cpu (optimized CPU), "
81
+ "rtmpose-cuda (NVIDIA GPU), "
82
+ "rtmpose-coreml (Apple Silicon)"
83
+ ),
84
+ show_default=True,
85
+ )
68
86
  # Batch processing options
69
87
  @click.option(
70
88
  "--batch",
@@ -143,6 +161,7 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
143
161
  json_output: str | None,
144
162
  quality: str,
145
163
  verbose: bool,
164
+ pose_backend: str,
146
165
  batch: bool,
147
166
  workers: int,
148
167
  output_dir: str | None,
@@ -212,6 +231,7 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
212
231
  json_output_dir,
213
232
  csv_summary,
214
233
  expert_params,
234
+ pose_backend,
215
235
  )
216
236
  else:
217
237
  # Single video mode (original behavior)
@@ -222,6 +242,7 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
222
242
  quality,
223
243
  verbose,
224
244
  expert_params,
245
+ pose_backend,
225
246
  )
226
247
 
227
248
 
@@ -232,6 +253,7 @@ def _process_single(
232
253
  quality: str,
233
254
  verbose: bool,
234
255
  expert_params: AnalysisParameters,
256
+ pose_backend: str,
235
257
  ) -> None:
236
258
  """Process a single video by calling the API."""
237
259
  click.echo(f"Analyzing video: {video_path}", err=True)
@@ -266,6 +288,7 @@ def _process_single(
266
288
  overrides=overrides,
267
289
  detection_confidence=expert_params.detection_confidence,
268
290
  tracking_confidence=expert_params.tracking_confidence,
291
+ pose_backend=pose_backend,
269
292
  verbose=verbose,
270
293
  )
271
294
 
@@ -309,6 +332,7 @@ def _create_video_configs(
309
332
  output_dir: str | None,
310
333
  json_output_dir: str | None,
311
334
  expert_params: AnalysisParameters,
335
+ pose_backend: str,
312
336
  ) -> list[DropJumpVideoConfig]:
313
337
  """Build configuration objects for each video.
314
338
 
@@ -356,6 +380,7 @@ def _create_video_configs(
356
380
  overrides=overrides,
357
381
  detection_confidence=expert_params.detection_confidence,
358
382
  tracking_confidence=expert_params.tracking_confidence,
383
+ pose_backend=expert_params.pose_backend,
359
384
  )
360
385
  configs.append(config)
361
386
 
@@ -520,6 +545,7 @@ def _process_batch(
520
545
  json_output_dir: str | None,
521
546
  csv_summary: str | None,
522
547
  expert_params: AnalysisParameters,
548
+ pose_backend: str,
523
549
  ) -> None:
524
550
  """Process multiple videos in batch mode using parallel processing."""
525
551
  click.echo(f"\nBatch processing {len(video_files)} videos with {workers} workers", err=True)
@@ -530,7 +556,7 @@ def _process_batch(
530
556
 
531
557
  # Create video configurations
532
558
  configs = _create_video_configs(
533
- video_files, quality, output_dir, json_output_dir, expert_params
559
+ video_files, quality, output_dir, json_output_dir, expert_params, pose_backend
534
560
  )
535
561
 
536
562
  # Progress callback
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.67.0
3
+ Version: 0.68.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -23,9 +23,13 @@ Requires-Python: <3.13,>=3.10
23
23
  Requires-Dist: click>=8.1.7
24
24
  Requires-Dist: mediapipe>=0.10.30
25
25
  Requires-Dist: numpy>=1.26.0
26
+ Requires-Dist: onnxruntime-gpu>=1.23.2
26
27
  Requires-Dist: opencv-python>=4.9.0
27
28
  Requires-Dist: platformdirs>=4.0.0
29
+ Requires-Dist: rtmlib>=0.0.13
28
30
  Requires-Dist: scipy>=1.11.0
31
+ Requires-Dist: torch>=2.0.0
32
+ Requires-Dist: tqdm>=4.67.1
29
33
  Requires-Dist: typing-extensions>=4.15.0
30
34
  Description-Content-Type: text/markdown
31
35
 
@@ -1,47 +1,51 @@
1
1
  kinemotion/__init__.py,sha256=Ho_BUtsM0PBxBW1ye9RlUg0ZqBlgGudRI9bZTF7QKUI,966
2
2
  kinemotion/api.py,sha256=uG1e4bTnj2c-6cbZJEZ_LjMwFdaG32ba2KcK_XjE_NI,1040
3
- kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
3
+ kinemotion/cli.py,sha256=_Us9krSce4GUKtlLIPrFUhKmPWURzeJ1-ydR_YU2VGw,626
4
4
  kinemotion/cmj/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
5
5
  kinemotion/cmj/analysis.py,sha256=jM9ZX44h1__Cg2iIhAYRoo_5fPwIOeV5Q2FZ22rMvKY,22202
6
- kinemotion/cmj/api.py,sha256=DkrA6xgSp43g2xpT8ai32BvMGo3Bmj23upxhOYGlpXY,18284
7
- kinemotion/cmj/cli.py,sha256=P2b77IIw6kqTSIkncxlShzhmjIwqMFBNd-pZxYP-TsI,9918
6
+ kinemotion/cmj/api.py,sha256=Pyc0IoFyvBWcLnWq3lV9pn2ZcdFEU8ki_GX1DfXARSU,19417
7
+ kinemotion/cmj/cli.py,sha256=-hNNN7rshrICJ7bG0EfSdEDOPNcGX_CtOZfgrZfatQg,10522
8
8
  kinemotion/cmj/debug_overlay.py,sha256=bX9aPLhXiLCCMZW9v8Y4OiOAaZO0i-UGr-Pl8HCsmbI,15810
9
9
  kinemotion/cmj/joint_angles.py,sha256=HmheIEiKcQz39cRezk4h-htorOhGNPsqKIR9RsAEKts,9960
10
10
  kinemotion/cmj/kinematics.py,sha256=KwA8uSj3g1SeNf0NXMSHsp3gIw6Gfa-6QWIwdYdRXYw,13362
11
11
  kinemotion/cmj/metrics_validator.py,sha256=3oFB331Xch2sRMTvqALiwOvsWkCUhrLQ7ZCZ4QhI2lA,30986
12
12
  kinemotion/cmj/validation_bounds.py,sha256=Ry915JdInPXbqjaVGNY_urnDO1PAkCSJqHwNKRq-VkU,12048
13
- kinemotion/core/__init__.py,sha256=KtpEPkvm4kQMh_-ue8AUwT2uOs9gJ1S-45OfhVJfF_E,1685
13
+ kinemotion/core/__init__.py,sha256=8WB7tAJPKOxgNzbhIEOnGnkRr0CcdNeTnz91Jsiyafo,1812
14
14
  kinemotion/core/auto_tuning.py,sha256=lhAqPc-eLjMYx9BCvKdECE7TD2Dweb9KcifV6JHaXOE,11278
15
15
  kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
16
- kinemotion/core/debug_overlay_utils.py,sha256=Izwv3LnyRrrNxecwfOD4R6oxKqH3yIee0BaVz-BuIcI,13033
16
+ kinemotion/core/debug_overlay_utils.py,sha256=D4aT8xstThPcV2i5D4KJZJEttW6E_4GE5QiERqe1MwI,13049
17
17
  kinemotion/core/determinism.py,sha256=Frw-KAOvAxTL_XtxoWpXCjMbQPUKEAusK6JctlkeuRo,2509
18
18
  kinemotion/core/experimental.py,sha256=IK05AF4aZS15ke85hF3TWCqRIXU1AlD_XKzFz735Ua8,3640
19
19
  kinemotion/core/filtering.py,sha256=Oc__pV6iHEGyyovbqa5SUi-6v8QyvaRVwA0LRayM884,11355
20
20
  kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
21
21
  kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
22
22
  kinemotion/core/model_downloader.py,sha256=mqhJBHGaNe0aN9qbcBqvcTk9FDd7xaHqEcwD-fyP89c,5205
23
- kinemotion/core/pipeline_utils.py,sha256=-LjyN2bMEFM-ED2cl3qGdLD4mjPG6TG9nxXgw9qBYZQ,15134
24
- kinemotion/core/pose.py,sha256=BODO6EDJZ6n-J9oh-lWJLBzTb7Zai2rpl52yy_l1sLs,12792
23
+ kinemotion/core/pipeline_utils.py,sha256=B5jMXoiLaTh02uGA2MIe1uZLVSRGZ5nxbARuvdrjDrQ,15161
24
+ kinemotion/core/pose.py,sha256=vSdSdV-aoRUEIhvlIW2GvZ-zQspv98_F3uUFQ_Z3nAU,28201
25
25
  kinemotion/core/pose_landmarks.py,sha256=LcEbL5K5xKia6dCzWf6Ft18UIE1CLMMqCZ3KUjwUDzM,1558
26
26
  kinemotion/core/quality.py,sha256=VUkRL2N6B7lfIZ2pE9han_U68JwarmZz1U0ygHkgkhE,13022
27
+ kinemotion/core/rtmpose_cpu.py,sha256=Mox8Hon3hulyA6uHKUIe2hCR4xinDZfotOOIVncXi2M,22356
28
+ kinemotion/core/rtmpose_wrapper.py,sha256=R5QlZIHJczM1AxvVqF256DcD_LvGvHyjrFnxCsWc7do,6202
27
29
  kinemotion/core/smoothing.py,sha256=ELMHL7pzSqYffjnLDBUMBJIgt1AwOssDInE8IiXBbig,15942
28
- kinemotion/core/timing.py,sha256=d1rjZc07Nbi5Jrio9AC-zeS0dNAlbPyNIydLz7X75Pk,7804
30
+ kinemotion/core/timing.py,sha256=ITX77q4hbtajRuWfgwYhws8nCvOeKFlEdKjCu8lD9_w,7938
29
31
  kinemotion/core/types.py,sha256=A_HclzKpf3By5DiJ0wY9B-dQJrIVAAhUfGab7qTSIL8,1279
30
32
  kinemotion/core/validation.py,sha256=0xVv-ftWveV60fJ97kmZMuy2Qqqb5aZLR50dDIrjnhg,6773
31
33
  kinemotion/core/video_io.py,sha256=TxdLUEpekGytesL3X3k78WWgZTOd5fuge30hU4Uy48Y,9198
32
34
  kinemotion/dropjump/__init__.py,sha256=tC3H3BrCg8Oj-db-Vrtx4PH_llR1Ppkd5jwaOjhQcLg,862
33
35
  kinemotion/dropjump/analysis.py,sha256=YomuoJF_peyrBSpeT89Q5_sBgY0kEDyq7TFrtEnRLjs,28049
34
- kinemotion/dropjump/api.py,sha256=uidio49CXisyWKd287CnCrM51GusG9DWAIUKGH85fpM,20584
35
- kinemotion/dropjump/cli.py,sha256=gUef9nmyR5952h1WnfBGyCdFXQvzVTlCKYAjJGcO4sE,16819
36
+ kinemotion/dropjump/api.py,sha256=0AGA896H97x9GZoO8kX75iBCNCHEEm1lH99yoqJceW8,21522
37
+ kinemotion/dropjump/cli.py,sha256=sJccY6HrRqj6DKERtsUgy__Wmp-pdm5c765EN3ydiec,17562
36
38
  kinemotion/dropjump/debug_overlay.py,sha256=9RQYXPRf0q2wdy6y2Ak2R4tpRceDwC8aJrXZzkmh3Wo,5942
37
39
  kinemotion/dropjump/kinematics.py,sha256=dx4PuXKfKMKcsc_HX6sXj8rHXf9ksiZIOAIkJ4vBlY4,19637
38
40
  kinemotion/dropjump/metrics_validator.py,sha256=lSfo4Lm5FHccl8ijUP6SA-kcSh50LS9hF8UIyWxcnW8,9243
39
41
  kinemotion/dropjump/validation_bounds.py,sha256=x4yjcFxyvdMp5e7MkcoUosGLeGsxBh1Lft6h__AQ2G8,5124
40
42
  kinemotion/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
43
  kinemotion/models/pose_landmarker_lite.task,sha256=WZKeHR7pUodzXd2DOxnPSsRtKbx6_du_Z1PEWWkNV0o,5777746
44
+ kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx,sha256=hcXWaaoLDHlpeZ6p4GINO4wdPCtoeCQ-mKHvg1FFeYY,22793379
45
+ kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx,sha256=zrEcBymPlcUNfFq-uQbQM0DIXyOqeePmaWbn-2wwclA,20283006
42
46
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
- kinemotion-0.67.0.dist-info/METADATA,sha256=ObxX6XYADlJtpS6I2b6pylCUZLsU65vSAyG9fH3N1d4,26097
44
- kinemotion-0.67.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
45
- kinemotion-0.67.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
46
- kinemotion-0.67.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
47
- kinemotion-0.67.0.dist-info/RECORD,,
47
+ kinemotion-0.68.0.dist-info/METADATA,sha256=-hJON9AMNej7r9m1PH4rTdEQd1VJ03TqJxMapeadA9o,26222
48
+ kinemotion-0.68.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
49
+ kinemotion-0.68.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
50
+ kinemotion-0.68.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
51
+ kinemotion-0.68.0.dist-info/RECORD,,