kinemotion 0.43.0__py3-none-any.whl → 0.45.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kinemotion might be problematic. Click here for more details.

@@ -0,0 +1,429 @@
1
+ """Shared pipeline utilities for kinematic analysis."""
2
+
3
+ from collections.abc import Callable
4
+ from concurrent.futures import ProcessPoolExecutor, as_completed
5
+ from typing import TypeVar
6
+
7
+ import cv2
8
+ import numpy as np
9
+
10
+ from ..cmj.analysis import compute_average_hip_position
11
+ from ..dropjump.analysis import compute_average_foot_position
12
+ from .auto_tuning import AnalysisParameters, QualityPreset, VideoCharacteristics
13
+ from .pose import PoseTracker
14
+ from .smoothing import smooth_landmarks, smooth_landmarks_advanced
15
+ from .timing import PerformanceTimer
16
+ from .video_io import VideoProcessor
17
+
18
+ TResult = TypeVar("TResult")
19
+ TConfig = TypeVar("TConfig")
20
+
21
+
22
+ def parse_quality_preset(quality: str) -> QualityPreset:
23
+ """Parse and validate quality preset string.
24
+
25
+ Args:
26
+ quality: Quality preset string ('fast', 'balanced', or 'accurate')
27
+
28
+ Returns:
29
+ QualityPreset enum value
30
+
31
+ Raises:
32
+ ValueError: If quality preset is invalid
33
+ """
34
+ try:
35
+ return QualityPreset(quality.lower())
36
+ except ValueError as e:
37
+ raise ValueError(
38
+ f"Invalid quality preset: {quality}. "
39
+ "Must be 'fast', 'balanced', or 'accurate'"
40
+ ) from e
41
+
42
+
43
+ def determine_confidence_levels(
44
+ quality_preset: QualityPreset,
45
+ detection_confidence: float | None,
46
+ tracking_confidence: float | None,
47
+ ) -> tuple[float, float]:
48
+ """Determine detection and tracking confidence levels.
49
+
50
+ Args:
51
+ quality_preset: Quality preset enum
52
+ detection_confidence: Optional expert override for detection confidence
53
+ tracking_confidence: Optional expert override for tracking confidence
54
+
55
+ Returns:
56
+ Tuple of (detection_confidence, tracking_confidence)
57
+ """
58
+ initial_detection_conf = 0.5
59
+ initial_tracking_conf = 0.5
60
+
61
+ if quality_preset == QualityPreset.FAST:
62
+ initial_detection_conf = 0.3
63
+ initial_tracking_conf = 0.3
64
+ elif quality_preset == QualityPreset.ACCURATE:
65
+ initial_detection_conf = 0.6
66
+ initial_tracking_conf = 0.6
67
+
68
+ if detection_confidence is not None:
69
+ initial_detection_conf = detection_confidence
70
+ if tracking_confidence is not None:
71
+ initial_tracking_conf = tracking_confidence
72
+
73
+ return initial_detection_conf, initial_tracking_conf
74
+
75
+
76
+ def apply_expert_overrides(
77
+ params: AnalysisParameters,
78
+ smoothing_window: int | None,
79
+ velocity_threshold: float | None,
80
+ min_contact_frames: int | None,
81
+ visibility_threshold: float | None,
82
+ ) -> AnalysisParameters:
83
+ """Apply expert parameter overrides to auto-tuned parameters.
84
+
85
+ Args:
86
+ params: Auto-tuned parameters object
87
+ smoothing_window: Optional override for smoothing window
88
+ velocity_threshold: Optional override for velocity threshold
89
+ min_contact_frames: Optional override for minimum contact frames
90
+ visibility_threshold: Optional override for visibility threshold
91
+
92
+ Returns:
93
+ Modified params object (mutated in place)
94
+ """
95
+ if smoothing_window is not None:
96
+ params.smoothing_window = smoothing_window
97
+ if velocity_threshold is not None:
98
+ params.velocity_threshold = velocity_threshold
99
+ if min_contact_frames is not None:
100
+ params.min_contact_frames = min_contact_frames
101
+ if visibility_threshold is not None:
102
+ params.visibility_threshold = visibility_threshold
103
+ return params
104
+
105
+
106
+ def print_verbose_parameters(
107
+ video: VideoProcessor,
108
+ characteristics: VideoCharacteristics,
109
+ quality_preset: QualityPreset,
110
+ params: AnalysisParameters,
111
+ ) -> None:
112
+ """Print auto-tuned parameters in verbose mode.
113
+
114
+ Args:
115
+ video: Video processor with fps and dimensions
116
+ characteristics: Video analysis characteristics
117
+ quality_preset: Selected quality preset
118
+ params: Auto-tuned parameters
119
+ """
120
+ print("\n" + "=" * 60)
121
+ print("AUTO-TUNED PARAMETERS")
122
+ print("=" * 60)
123
+ print(f"Video FPS: {video.fps:.2f}")
124
+ print(
125
+ f"Tracking quality: {characteristics.tracking_quality} "
126
+ f"(avg visibility: {characteristics.avg_visibility:.2f})"
127
+ )
128
+ print(f"Quality preset: {quality_preset.value}")
129
+ print("\nSelected parameters:")
130
+ print(f" smoothing_window: {params.smoothing_window}")
131
+ print(f" polyorder: {params.polyorder}")
132
+ print(f" velocity_threshold: {params.velocity_threshold:.4f}")
133
+ print(f" min_contact_frames: {params.min_contact_frames}")
134
+ print(f" visibility_threshold: {params.visibility_threshold}")
135
+ print(f" detection_confidence: {params.detection_confidence}")
136
+ print(f" tracking_confidence: {params.tracking_confidence}")
137
+ print(f" outlier_rejection: {params.outlier_rejection}")
138
+ print(f" bilateral_filter: {params.bilateral_filter}")
139
+ print(f" use_curvature: {params.use_curvature}")
140
+ print("=" * 60 + "\n")
141
+
142
+
143
+ def process_all_frames(
144
+ video: VideoProcessor,
145
+ tracker: PoseTracker,
146
+ verbose: bool,
147
+ timer: PerformanceTimer | None = None,
148
+ close_tracker: bool = True,
149
+ target_debug_fps: float = 30.0,
150
+ max_debug_dim: int = 720,
151
+ ) -> tuple[list, list, list]:
152
+ """Process all frames from video and extract pose landmarks.
153
+
154
+ Args:
155
+ video: Video processor to read frames from
156
+ tracker: Pose tracker for landmark detection
157
+ verbose: Print progress messages
158
+ timer: Optional PerformanceTimer for measuring operations
159
+ close_tracker: Whether to close the tracker after processing (default: True)
160
+ target_debug_fps: Target FPS for debug video (default: 30.0)
161
+ max_debug_dim: Max dimension for debug video frames (default: 720)
162
+
163
+ Returns:
164
+ Tuple of (debug_frames, landmarks_sequence, frame_indices)
165
+
166
+ Raises:
167
+ ValueError: If no frames could be processed
168
+ """
169
+ if verbose:
170
+ print("Tracking pose landmarks...")
171
+
172
+ landmarks_sequence = []
173
+ debug_frames = []
174
+ frame_indices = []
175
+
176
+ step = max(1, int(video.fps / target_debug_fps))
177
+
178
+ w, h = video.display_width, video.display_height
179
+ scale = 1.0
180
+ if max(w, h) > max_debug_dim:
181
+ scale = max_debug_dim / max(w, h)
182
+
183
+ debug_w = int(w * scale) // 2 * 2
184
+ debug_h = int(h * scale) // 2 * 2
185
+ should_resize = (debug_w != video.width) or (debug_h != video.height)
186
+
187
+ frame_idx = 0
188
+
189
+ if timer:
190
+ with timer.measure("pose_tracking"):
191
+ while True:
192
+ frame = video.read_frame()
193
+ if frame is None:
194
+ break
195
+
196
+ landmarks = tracker.process_frame(frame)
197
+ landmarks_sequence.append(landmarks)
198
+
199
+ if frame_idx % step == 0:
200
+ if should_resize:
201
+ processed_frame = cv2.resize(
202
+ frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
203
+ )
204
+ else:
205
+ processed_frame = frame
206
+
207
+ debug_frames.append(processed_frame)
208
+ frame_indices.append(frame_idx)
209
+
210
+ frame_idx += 1
211
+ else:
212
+ while True:
213
+ frame = video.read_frame()
214
+ if frame is None:
215
+ break
216
+
217
+ landmarks = tracker.process_frame(frame)
218
+ landmarks_sequence.append(landmarks)
219
+
220
+ if frame_idx % step == 0:
221
+ if should_resize:
222
+ processed_frame = cv2.resize(
223
+ frame, (debug_w, debug_h), interpolation=cv2.INTER_LINEAR
224
+ )
225
+ else:
226
+ processed_frame = frame
227
+
228
+ debug_frames.append(processed_frame)
229
+ frame_indices.append(frame_idx)
230
+
231
+ frame_idx += 1
232
+
233
+ if close_tracker:
234
+ tracker.close()
235
+
236
+ if not landmarks_sequence:
237
+ raise ValueError("No frames could be processed from video")
238
+
239
+ return debug_frames, landmarks_sequence, frame_indices
240
+
241
+
242
+ def apply_smoothing(
243
+ landmarks_sequence: list,
244
+ params: AnalysisParameters,
245
+ verbose: bool,
246
+ timer: PerformanceTimer | None = None,
247
+ ) -> list:
248
+ """Apply smoothing to landmark sequence with auto-tuned parameters.
249
+
250
+ Args:
251
+ landmarks_sequence: Sequence of landmarks from all frames
252
+ params: Auto-tuned parameters containing smoothing settings
253
+ verbose: Print progress messages
254
+ timer: Optional PerformanceTimer for measuring operations
255
+
256
+ Returns:
257
+ Smoothed landmarks sequence
258
+ """
259
+ if params.outlier_rejection or params.bilateral_filter:
260
+ if verbose:
261
+ if params.outlier_rejection:
262
+ print("Smoothing landmarks with outlier rejection...")
263
+ if params.bilateral_filter:
264
+ print("Using bilateral temporal filter...")
265
+ if timer:
266
+ with timer.measure("smoothing"):
267
+ return smooth_landmarks_advanced(
268
+ landmarks_sequence,
269
+ window_length=params.smoothing_window,
270
+ polyorder=params.polyorder,
271
+ use_outlier_rejection=params.outlier_rejection,
272
+ use_bilateral=params.bilateral_filter,
273
+ )
274
+ else:
275
+ return smooth_landmarks_advanced(
276
+ landmarks_sequence,
277
+ window_length=params.smoothing_window,
278
+ polyorder=params.polyorder,
279
+ use_outlier_rejection=params.outlier_rejection,
280
+ use_bilateral=params.bilateral_filter,
281
+ )
282
+ else:
283
+ if verbose:
284
+ print("Smoothing landmarks...")
285
+ if timer:
286
+ with timer.measure("smoothing"):
287
+ return smooth_landmarks(
288
+ landmarks_sequence,
289
+ window_length=params.smoothing_window,
290
+ polyorder=params.polyorder,
291
+ )
292
+ else:
293
+ return smooth_landmarks(
294
+ landmarks_sequence,
295
+ window_length=params.smoothing_window,
296
+ polyorder=params.polyorder,
297
+ )
298
+
299
+
300
+ def calculate_foot_visibility(frame_landmarks: dict) -> float:
301
+ """Calculate average visibility of foot landmarks.
302
+
303
+ Args:
304
+ frame_landmarks: Dictionary of landmarks for a frame
305
+
306
+ Returns:
307
+ Average visibility value (0-1)
308
+ """
309
+ foot_keys = ["left_ankle", "right_ankle", "left_heel", "right_heel"]
310
+ foot_vis = [frame_landmarks[key][2] for key in foot_keys if key in frame_landmarks]
311
+ return float(np.mean(foot_vis)) if foot_vis else 0.0
312
+
313
+
314
+ def extract_vertical_positions(
315
+ smoothed_landmarks: list,
316
+ target: str = "foot",
317
+ ) -> tuple[np.ndarray, np.ndarray]:
318
+ """Extract vertical positions and visibilities from smoothed landmarks.
319
+
320
+ Args:
321
+ smoothed_landmarks: Smoothed landmark sequence
322
+ target: Tracking target "foot" or "hip" (default: "foot")
323
+
324
+ Returns:
325
+ Tuple of (vertical_positions, visibilities) as numpy arrays
326
+ """
327
+ position_list: list[float] = []
328
+ visibilities_list: list[float] = []
329
+
330
+ for frame_landmarks in smoothed_landmarks:
331
+ if frame_landmarks:
332
+ if target == "hip":
333
+ _, y = compute_average_hip_position(frame_landmarks)
334
+ vis = calculate_foot_visibility(frame_landmarks)
335
+ else:
336
+ _, y = compute_average_foot_position(frame_landmarks)
337
+ vis = calculate_foot_visibility(frame_landmarks)
338
+
339
+ position_list.append(y)
340
+ visibilities_list.append(vis)
341
+ else:
342
+ position_list.append(position_list[-1] if position_list else 0.5)
343
+ visibilities_list.append(0.0)
344
+
345
+ return np.array(position_list), np.array(visibilities_list)
346
+
347
+
348
+ def convert_timer_to_stage_names(
349
+ timer_metrics: dict[str, float],
350
+ ) -> dict[str, float]:
351
+ """Convert timer metric names to human-readable stage names.
352
+
353
+ Args:
354
+ timer_metrics: Dictionary from PerformanceTimer.get_metrics()
355
+
356
+ Returns:
357
+ Dictionary with human-readable stage names as keys
358
+ """
359
+ mapping = {
360
+ "video_initialization": "Video initialization",
361
+ "pose_tracking": "Pose tracking",
362
+ "parameter_auto_tuning": "Parameter auto-tuning",
363
+ "smoothing": "Smoothing",
364
+ "vertical_position_extraction": "Vertical position extraction",
365
+ "ground_contact_detection": "Ground contact detection",
366
+ "metrics_calculation": "Metrics calculation",
367
+ "quality_assessment": "Quality assessment",
368
+ "metadata_building": "Metadata building",
369
+ "metrics_validation": "Metrics validation",
370
+ "phase_detection": "Phase detection",
371
+ "json_serialization": "JSON serialization",
372
+ "debug_video_generation": "Debug video generation",
373
+ "debug_video_reencode": "Debug video re-encoding",
374
+ "frame_rotation": "Frame rotation",
375
+ "debug_video_resize": "Debug video resizing",
376
+ "debug_video_copy": "Debug video frame copy",
377
+ "debug_video_draw": "Debug video drawing",
378
+ "debug_video_write": "Debug video encoding",
379
+ }
380
+ return {mapping.get(k, k): v for k, v in timer_metrics.items()}
381
+
382
+
383
+ def process_videos_bulk_generic(
384
+ configs: list[TConfig],
385
+ processor_func: Callable[[TConfig], TResult],
386
+ error_factory: Callable[[str, str], TResult],
387
+ max_workers: int = 4,
388
+ progress_callback: Callable[[TResult], None] | None = None,
389
+ ) -> list[TResult]:
390
+ """
391
+ Generic function to process multiple videos in parallel.
392
+
393
+ Args:
394
+ configs: List of configuration objects
395
+ processor_func: Function to process a single config (must be picklable)
396
+ error_factory: Function that takes (video_path, error_msg) and returns a
397
+ result object
398
+ max_workers: Maximum number of parallel workers
399
+ progress_callback: Optional callback for progress updates
400
+
401
+ Returns:
402
+ List of result objects
403
+ """
404
+ results: list[TResult] = []
405
+
406
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
407
+ future_to_config = {
408
+ executor.submit(processor_func, config): config for config in configs
409
+ }
410
+
411
+ for future in as_completed(future_to_config):
412
+ config = future_to_config[future]
413
+ # Assume config has video_path - this is a constraint on TConfig
414
+ # but we can't easily enforce it with TypeVar in generic way
415
+ # without Protocol
416
+ # For now we assume dynamic access is okay or TConfig is duck-typed
417
+ video_path = getattr(config, "video_path", "unknown")
418
+
419
+ try:
420
+ result = future.result()
421
+ except Exception as exc:
422
+ result = error_factory(video_path, f"Unexpected error: {str(exc)}")
423
+
424
+ results.append(result)
425
+
426
+ if progress_callback:
427
+ progress_callback(result)
428
+
429
+ return results
@@ -1,7 +1,6 @@
1
1
  """Command-line interface for drop jump analysis."""
2
2
 
3
3
  import csv
4
- import glob
5
4
  import json
6
5
  import sys
7
6
  from dataclasses import dataclass
@@ -15,6 +14,10 @@ from ..api import (
15
14
  process_dropjump_video,
16
15
  process_dropjump_videos_bulk,
17
16
  )
17
+ from ..core.cli_utils import (
18
+ collect_video_files,
19
+ generate_batch_output_paths,
20
+ )
18
21
 
19
22
 
20
23
  @dataclass
@@ -176,20 +179,11 @@ def dropjump_analyze( # NOSONAR(S107) - Click CLI requires individual
176
179
 
177
180
  \b
178
181
  # Batch with output directories
179
- kinemotion dropjump-analyze videos/*.mp4 --batch \\
182
+ kinemotion dropjump-analyze videos/*.mp4 --batch \
180
183
  --json-output-dir results/ --csv-summary summary.csv
181
184
  """
182
185
  # Expand glob patterns and collect all video files
183
- video_files: list[str] = []
184
- for pattern in video_path:
185
- expanded = glob.glob(pattern)
186
- if expanded:
187
- video_files.extend(expanded)
188
- elif Path(pattern).exists():
189
- # Direct path (not a glob pattern)
190
- video_files.append(pattern)
191
- else:
192
- click.echo(f"Warning: No files found for pattern: {pattern}", err=True)
186
+ video_files = collect_video_files(video_path)
193
187
 
194
188
  if not video_files:
195
189
  click.echo("Error: No video files found", err=True)
@@ -275,7 +269,8 @@ def _process_single(
275
269
 
276
270
 
277
271
  def _setup_batch_output_dirs(
278
- output_dir: str | None, json_output_dir: str | None
272
+ output_dir: str | None,
273
+ json_output_dir: str | None,
279
274
  ) -> None:
280
275
  """Create output directories for batch processing.
281
276
 
@@ -313,15 +308,9 @@ def _create_video_configs(
313
308
  """
314
309
  configs: list[DropJumpVideoConfig] = []
315
310
  for video_file in video_files:
316
- video_name = Path(video_file).stem
317
-
318
- debug_video = None
319
- if output_dir:
320
- debug_video = str(Path(output_dir) / f"{video_name}_debug.mp4")
321
-
322
- json_file = None
323
- if json_output_dir:
324
- json_file = str(Path(json_output_dir) / f"{video_name}.json")
311
+ debug_video, json_file = generate_batch_output_paths(
312
+ video_file, output_dir, json_output_dir
313
+ )
325
314
 
326
315
  config = DropJumpVideoConfig(
327
316
  video_path=video_file,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: kinemotion
3
- Version: 0.43.0
3
+ Version: 0.45.0
4
4
  Summary: Video-based kinematic analysis for athletic performance
5
5
  Project-URL: Homepage, https://github.com/feniix/kinemotion
6
6
  Project-URL: Repository, https://github.com/feniix/kinemotion
@@ -1,9 +1,9 @@
1
1
  kinemotion/__init__.py,sha256=wPItmyGJUOFM6GPRVhAEvRz0-ErI7e2qiUREYJ9EfPQ,943
2
- kinemotion/api.py,sha256=oSCc3GNJcIBhjHyCoHbWRur8q3IzMP5y43ngwjyLwkg,52174
2
+ kinemotion/api.py,sha256=ltOsxG9QGFqkBW15czaQ0Rn_Ed9Nh_qKYXBTiSBopXU,33181
3
3
  kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
4
4
  kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
5
5
  kinemotion/cmj/analysis.py,sha256=qtULzp9uYzm5M0_Qu5YGJpuwjg9fz1VKAg6xg4NJxvM,21639
6
- kinemotion/cmj/cli.py,sha256=Mj2h9It1jVjAauvtCxfLWTRijj7zbYhxZuebhw2Zz6w,10828
6
+ kinemotion/cmj/cli.py,sha256=d3_lX-zstch52BxDZUQJyTBpkr2YKwkOE0gUW6nAUb0,9908
7
7
  kinemotion/cmj/debug_overlay.py,sha256=fXmWoHhqMLGo4vTtB6Ezs3yLUDOLw63zLIgU2gFlJQU,15892
8
8
  kinemotion/cmj/joint_angles.py,sha256=HmheIEiKcQz39cRezk4h-htorOhGNPsqKIR9RsAEKts,9960
9
9
  kinemotion/cmj/kinematics.py,sha256=Lq9m9MNQxnXv31VhKmXVrlM7rRkhi8PxW50N_CC8_8Y,11860
@@ -11,13 +11,14 @@ kinemotion/cmj/metrics_validator.py,sha256=V_fmlczYH06SBtwqESv-IfGi3wDsIy3RQbd7V
11
11
  kinemotion/cmj/validation_bounds.py,sha256=9ZTo68fl3ooyWjXXyTMRLpK9tFANa_rQf3oHhq7iQGE,11995
12
12
  kinemotion/core/__init__.py,sha256=GTLnE_gGIk7HC51epWUXVuNxcvS5lf7UL6qeWRlgMV0,1352
13
13
  kinemotion/core/auto_tuning.py,sha256=wtCUMOhBChVJNXfEeku3GCMW4qED6MF-O_mv2sPTiVQ,11324
14
- kinemotion/core/cli_utils.py,sha256=zbnifPhD-OYofJioeYfJtshuWcl8OAEWtqCGVF4ctAI,7966
14
+ kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
15
15
  kinemotion/core/debug_overlay_utils.py,sha256=vOoWv3vlNdNgPI2R-UwAZKtSpugUUsiokR_kvaz1UWg,9025
16
16
  kinemotion/core/determinism.py,sha256=NwVrHqJiVxxFHTBPVy8aDBJH2SLIcYIpdGFp7glblB8,2515
17
17
  kinemotion/core/experimental.py,sha256=IK05AF4aZS15ke85hF3TWCqRIXU1AlD_XKzFz735Ua8,3640
18
18
  kinemotion/core/filtering.py,sha256=GsC9BB71V07LJJHgS2lsaxUAtJsupcUiwtZFDgODh8c,11417
19
19
  kinemotion/core/formatting.py,sha256=G_3eqgOtym9RFOZVEwCxye4A2cyrmgvtQ214vIshowU,2480
20
20
  kinemotion/core/metadata.py,sha256=bJAVa4nym__zx1hNowSZduMGKBSGOPxTbBQkjm6N0D0,7207
21
+ kinemotion/core/pipeline_utils.py,sha256=s-2AJEt_beugjbCsiNyKVSc7YBdlgc9aocR_ZSX9PfQ,14783
21
22
  kinemotion/core/pose.py,sha256=Tq4VS0YmMzrprVUsELm6FQczyLhP8UKurM9ccYn1LLU,8959
22
23
  kinemotion/core/quality.py,sha256=dPGQp08y8DdEUbUdjTThnUOUsALgF0D2sdz50cm6wLI,13098
23
24
  kinemotion/core/smoothing.py,sha256=GAfC-jxu1eqNyDjsUXqUBicKx9um5hrk49wz1FxfRNM,15219
@@ -26,14 +27,14 @@ kinemotion/core/validation.py,sha256=LmKfSl4Ayw3DgwKD9IrhsPdzp5ia4drLsHA2UuU1SCM
26
27
  kinemotion/core/video_io.py,sha256=HyLwn22fKe37j18853YYYrQi0JQWAwxpepPLNkuZKnQ,8586
27
28
  kinemotion/dropjump/__init__.py,sha256=tC3H3BrCg8Oj-db-Vrtx4PH_llR1Ppkd5jwaOjhQcLg,862
28
29
  kinemotion/dropjump/analysis.py,sha256=MjxO-vps0nz_hXlnGk7cgq3jFenJYzsM0VVpHwnHXsM,27935
29
- kinemotion/dropjump/cli.py,sha256=n_Wfv3AC6YIgRPYhO3F2nTSai0NR7fh95nAoWjryQeY,16250
30
+ kinemotion/dropjump/cli.py,sha256=eLIA0rnx60vqD__PinB1-5nQ8_xQUhCGplwsB0u9MgU,15824
30
31
  kinemotion/dropjump/debug_overlay.py,sha256=9nlnDYB_ZJO4dC1uMhDa4UOYGMBsDpyPQD3WbJjbwpM,6130
31
32
  kinemotion/dropjump/kinematics.py,sha256=kH-XM66wlOCYMpjvyb6_Qh5ZebyOfFZ47rmhgE1Tww4,19404
32
33
  kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
33
34
  kinemotion/dropjump/validation_bounds.py,sha256=5b4I3CKPybuvrbn-nP5yCcGF_sH4Vtyw3a5AWWvWnBk,4645
34
35
  kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- kinemotion-0.43.0.dist-info/METADATA,sha256=BTxQ2TBeVYsVzux_OTmoE-tOu8gbEBv2oxe7j-3nyDM,26020
36
- kinemotion-0.43.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
- kinemotion-0.43.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
38
- kinemotion-0.43.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
39
- kinemotion-0.43.0.dist-info/RECORD,,
36
+ kinemotion-0.45.0.dist-info/METADATA,sha256=pzhaPBL4rK6K3JJvLgFSQDL4rzS0cDRY77yke4iV7b4,26020
37
+ kinemotion-0.45.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
38
+ kinemotion-0.45.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
39
+ kinemotion-0.45.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
40
+ kinemotion-0.45.0.dist-info/RECORD,,