kinemotion 0.47.3__py3-none-any.whl → 0.47.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kinemotion/cmj/api.py +192 -133
- {kinemotion-0.47.3.dist-info → kinemotion-0.47.4.dist-info}/METADATA +1 -1
- {kinemotion-0.47.3.dist-info → kinemotion-0.47.4.dist-info}/RECORD +6 -6
- {kinemotion-0.47.3.dist-info → kinemotion-0.47.4.dist-info}/WHEEL +0 -0
- {kinemotion-0.47.3.dist-info → kinemotion-0.47.4.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.47.3.dist-info → kinemotion-0.47.4.dist-info}/licenses/LICENSE +0 -0
kinemotion/cmj/api.py
CHANGED
|
@@ -5,12 +5,13 @@ import time
|
|
|
5
5
|
from collections.abc import Callable
|
|
6
6
|
from dataclasses import dataclass
|
|
7
7
|
from pathlib import Path
|
|
8
|
-
from typing import TYPE_CHECKING
|
|
9
8
|
|
|
10
|
-
|
|
11
|
-
|
|
9
|
+
import numpy as np
|
|
10
|
+
from numpy.typing import NDArray
|
|
12
11
|
|
|
13
12
|
from ..core.auto_tuning import (
|
|
13
|
+
AnalysisParameters,
|
|
14
|
+
QualityPreset,
|
|
14
15
|
analyze_video_sample,
|
|
15
16
|
auto_tune_parameters,
|
|
16
17
|
)
|
|
@@ -37,8 +38,9 @@ from ..core.pipeline_utils import (
|
|
|
37
38
|
process_videos_bulk_generic,
|
|
38
39
|
)
|
|
39
40
|
from ..core.pose import PoseTracker
|
|
40
|
-
from ..core.quality import assess_jump_quality
|
|
41
|
+
from ..core.quality import QualityAssessment, assess_jump_quality
|
|
41
42
|
from ..core.timing import PerformanceTimer, Timer
|
|
43
|
+
from ..core.validation import ValidationResult
|
|
42
44
|
from ..core.video_io import VideoProcessor
|
|
43
45
|
from .analysis import compute_signed_velocity, detect_cmj_phases
|
|
44
46
|
from .debug_overlay import CMJDebugOverlayRenderer
|
|
@@ -46,6 +48,162 @@ from .kinematics import CMJMetrics, calculate_cmj_metrics
|
|
|
46
48
|
from .metrics_validator import CMJMetricsValidator
|
|
47
49
|
|
|
48
50
|
|
|
51
|
+
def _generate_debug_video(
|
|
52
|
+
output_video: str,
|
|
53
|
+
frames: list[NDArray[np.uint8]],
|
|
54
|
+
frame_indices: list[int],
|
|
55
|
+
smoothed_landmarks: list,
|
|
56
|
+
metrics: CMJMetrics,
|
|
57
|
+
video_fps: float,
|
|
58
|
+
timer: Timer,
|
|
59
|
+
verbose: bool,
|
|
60
|
+
) -> None:
|
|
61
|
+
"""Generate debug video with CMJ analysis overlay."""
|
|
62
|
+
if verbose:
|
|
63
|
+
print(f"Generating debug video: {output_video}")
|
|
64
|
+
|
|
65
|
+
debug_h, debug_w = frames[0].shape[:2]
|
|
66
|
+
step = max(1, int(video_fps / 30.0))
|
|
67
|
+
debug_fps = video_fps / step
|
|
68
|
+
|
|
69
|
+
with timer.measure("debug_video_generation"):
|
|
70
|
+
with CMJDebugOverlayRenderer(
|
|
71
|
+
output_video,
|
|
72
|
+
debug_w,
|
|
73
|
+
debug_h,
|
|
74
|
+
debug_w,
|
|
75
|
+
debug_h,
|
|
76
|
+
debug_fps,
|
|
77
|
+
timer=timer,
|
|
78
|
+
) as renderer:
|
|
79
|
+
for frame, idx in zip(frames, frame_indices, strict=True):
|
|
80
|
+
annotated = renderer.render_frame(
|
|
81
|
+
frame, smoothed_landmarks[idx], idx, metrics
|
|
82
|
+
)
|
|
83
|
+
renderer.write_frame(annotated)
|
|
84
|
+
|
|
85
|
+
if verbose:
|
|
86
|
+
print(f"Debug video saved: {output_video}")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _save_metrics_to_json(
|
|
90
|
+
metrics: CMJMetrics, json_output: str, timer: Timer, verbose: bool
|
|
91
|
+
) -> None:
|
|
92
|
+
"""Save metrics to JSON file."""
|
|
93
|
+
with timer.measure("json_serialization"):
|
|
94
|
+
output_path = Path(json_output)
|
|
95
|
+
metrics_dict = metrics.to_dict()
|
|
96
|
+
json_str = json.dumps(metrics_dict, indent=2)
|
|
97
|
+
output_path.write_text(json_str)
|
|
98
|
+
|
|
99
|
+
if verbose:
|
|
100
|
+
print(f"Metrics written to: {json_output}")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _print_timing_summary(start_time: float, timer: Timer, metrics: CMJMetrics) -> None:
|
|
104
|
+
"""Print verbose timing summary and metrics."""
|
|
105
|
+
total_time = time.time() - start_time
|
|
106
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
107
|
+
|
|
108
|
+
print("\n=== Timing Summary ===")
|
|
109
|
+
for stage, duration in stage_times.items():
|
|
110
|
+
percentage = (duration / total_time) * 100
|
|
111
|
+
dur_ms = duration * 1000
|
|
112
|
+
print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
113
|
+
total_ms = total_time * 1000
|
|
114
|
+
print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
115
|
+
print()
|
|
116
|
+
|
|
117
|
+
print(f"\nJump height: {metrics.jump_height:.3f}m")
|
|
118
|
+
print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
|
|
119
|
+
print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _print_quality_warnings(quality_result: QualityAssessment, verbose: bool) -> None:
|
|
123
|
+
"""Print quality warnings if present."""
|
|
124
|
+
if verbose and quality_result.warnings:
|
|
125
|
+
print("\n⚠️ Quality Warnings:")
|
|
126
|
+
for warning in quality_result.warnings:
|
|
127
|
+
print(f" - {warning}")
|
|
128
|
+
print()
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _print_validation_results(
|
|
132
|
+
validation_result: ValidationResult, verbose: bool
|
|
133
|
+
) -> None:
|
|
134
|
+
"""Print validation issues if present."""
|
|
135
|
+
if verbose and validation_result.issues:
|
|
136
|
+
print("\n⚠️ Validation Results:")
|
|
137
|
+
for issue in validation_result.issues:
|
|
138
|
+
print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _create_algorithm_config(params: AnalysisParameters) -> AlgorithmConfig:
|
|
142
|
+
"""Create algorithm configuration from parameters."""
|
|
143
|
+
return AlgorithmConfig(
|
|
144
|
+
detection_method="backward_search",
|
|
145
|
+
tracking_method="mediapipe_pose",
|
|
146
|
+
model_complexity=1,
|
|
147
|
+
smoothing=SmoothingConfig(
|
|
148
|
+
window_size=params.smoothing_window,
|
|
149
|
+
polynomial_order=params.polyorder,
|
|
150
|
+
use_bilateral_filter=params.bilateral_filter,
|
|
151
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
152
|
+
),
|
|
153
|
+
detection=DetectionConfig(
|
|
154
|
+
velocity_threshold=params.velocity_threshold,
|
|
155
|
+
min_contact_frames=params.min_contact_frames,
|
|
156
|
+
visibility_threshold=params.visibility_threshold,
|
|
157
|
+
use_curvature_refinement=params.use_curvature,
|
|
158
|
+
),
|
|
159
|
+
drop_detection=None,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _create_video_info(video_path: str, video: VideoProcessor) -> VideoInfo:
|
|
164
|
+
"""Create video information metadata."""
|
|
165
|
+
return VideoInfo(
|
|
166
|
+
source_path=video_path,
|
|
167
|
+
fps=video.fps,
|
|
168
|
+
width=video.width,
|
|
169
|
+
height=video.height,
|
|
170
|
+
duration_s=video.frame_count / video.fps,
|
|
171
|
+
frame_count=video.frame_count,
|
|
172
|
+
codec=video.codec,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def _create_processing_info(
|
|
177
|
+
start_time: float, quality_preset: QualityPreset, timer: Timer
|
|
178
|
+
) -> ProcessingInfo:
|
|
179
|
+
"""Create processing information metadata."""
|
|
180
|
+
processing_time = time.time() - start_time
|
|
181
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
182
|
+
|
|
183
|
+
return ProcessingInfo(
|
|
184
|
+
version=get_kinemotion_version(),
|
|
185
|
+
timestamp=create_timestamp(),
|
|
186
|
+
quality_preset=quality_preset.value,
|
|
187
|
+
processing_time_s=processing_time,
|
|
188
|
+
timing_breakdown=stage_times,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def _create_result_metadata(
|
|
193
|
+
quality_result: QualityAssessment,
|
|
194
|
+
video_info: VideoInfo,
|
|
195
|
+
processing_info: ProcessingInfo,
|
|
196
|
+
algorithm_config: AlgorithmConfig,
|
|
197
|
+
) -> ResultMetadata:
|
|
198
|
+
"""Create result metadata from components."""
|
|
199
|
+
return ResultMetadata(
|
|
200
|
+
quality=quality_result,
|
|
201
|
+
video=video_info,
|
|
202
|
+
processing=processing_info,
|
|
203
|
+
algorithm=algorithm_config,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
|
|
49
207
|
@dataclass
|
|
50
208
|
class CMJVideoConfig:
|
|
51
209
|
"""Configuration for processing a single CMJ video."""
|
|
@@ -86,7 +244,7 @@ def process_cmj_video(
|
|
|
86
244
|
tracking_confidence: float | None = None,
|
|
87
245
|
verbose: bool = False,
|
|
88
246
|
timer: Timer | None = None,
|
|
89
|
-
pose_tracker:
|
|
247
|
+
pose_tracker: PoseTracker | None = None,
|
|
90
248
|
) -> CMJMetrics:
|
|
91
249
|
"""
|
|
92
250
|
Process a single CMJ video and return metrics.
|
|
@@ -121,9 +279,7 @@ def process_cmj_video(
|
|
|
121
279
|
raise FileNotFoundError(f"Video file not found: {video_path}")
|
|
122
280
|
|
|
123
281
|
start_time = time.time()
|
|
124
|
-
|
|
125
|
-
timer = PerformanceTimer()
|
|
126
|
-
|
|
282
|
+
timer = timer or PerformanceTimer()
|
|
127
283
|
quality_preset = parse_quality_preset(quality)
|
|
128
284
|
|
|
129
285
|
with timer.measure("video_initialization"):
|
|
@@ -141,16 +297,12 @@ def process_cmj_video(
|
|
|
141
297
|
if verbose:
|
|
142
298
|
print("Processing all frames with MediaPipe pose tracking...")
|
|
143
299
|
|
|
144
|
-
tracker = pose_tracker
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
min_tracking_confidence=track_conf,
|
|
151
|
-
timer=timer,
|
|
152
|
-
)
|
|
153
|
-
should_close_tracker = True
|
|
300
|
+
tracker = pose_tracker or PoseTracker(
|
|
301
|
+
min_detection_confidence=det_conf,
|
|
302
|
+
min_tracking_confidence=track_conf,
|
|
303
|
+
timer=timer,
|
|
304
|
+
)
|
|
305
|
+
should_close_tracker = pose_tracker is None
|
|
154
306
|
|
|
155
307
|
frames, landmarks_sequence, frame_indices = process_all_frames(
|
|
156
308
|
video, tracker, verbose, timer, close_tracker=should_close_tracker
|
|
@@ -161,7 +313,6 @@ def process_cmj_video(
|
|
|
161
313
|
landmarks_sequence, video.fps, video.frame_count
|
|
162
314
|
)
|
|
163
315
|
params = auto_tune_parameters(characteristics, quality_preset)
|
|
164
|
-
|
|
165
316
|
params = apply_expert_overrides(
|
|
166
317
|
params,
|
|
167
318
|
smoothing_window,
|
|
@@ -185,13 +336,10 @@ def process_cmj_video(
|
|
|
185
336
|
vertical_positions, visibilities = extract_vertical_positions(
|
|
186
337
|
smoothed_landmarks, target="hip"
|
|
187
338
|
)
|
|
188
|
-
|
|
189
339
|
foot_positions, _ = extract_vertical_positions(
|
|
190
340
|
smoothed_landmarks, target="foot"
|
|
191
341
|
)
|
|
192
342
|
|
|
193
|
-
tracking_method = "hip_hybrid"
|
|
194
|
-
|
|
195
343
|
if verbose:
|
|
196
344
|
print("Detecting CMJ phases...")
|
|
197
345
|
with timer.measure("phase_detection"):
|
|
@@ -217,7 +365,6 @@ def process_cmj_video(
|
|
|
217
365
|
window_length=params.smoothing_window,
|
|
218
366
|
polyorder=params.polyorder,
|
|
219
367
|
)
|
|
220
|
-
|
|
221
368
|
metrics = calculate_cmj_metrics(
|
|
222
369
|
vertical_positions,
|
|
223
370
|
velocities,
|
|
@@ -226,7 +373,7 @@ def process_cmj_video(
|
|
|
226
373
|
takeoff_frame,
|
|
227
374
|
landing_frame,
|
|
228
375
|
video.fps,
|
|
229
|
-
tracking_method=
|
|
376
|
+
tracking_method="hip_hybrid",
|
|
230
377
|
)
|
|
231
378
|
|
|
232
379
|
if verbose:
|
|
@@ -238,137 +385,49 @@ def process_cmj_video(
|
|
|
238
385
|
use_median=True,
|
|
239
386
|
interpolate=False,
|
|
240
387
|
)
|
|
241
|
-
|
|
242
|
-
phases_detected = True
|
|
243
|
-
phase_count = 4
|
|
244
|
-
|
|
245
388
|
quality_result = assess_jump_quality(
|
|
246
389
|
visibilities=visibilities,
|
|
247
390
|
positions=vertical_positions,
|
|
248
391
|
outlier_mask=outlier_mask,
|
|
249
392
|
fps=video.fps,
|
|
250
|
-
phases_detected=
|
|
251
|
-
phase_count=
|
|
393
|
+
phases_detected=True,
|
|
394
|
+
phase_count=4,
|
|
252
395
|
)
|
|
253
396
|
|
|
254
|
-
|
|
255
|
-
detection_method="backward_search",
|
|
256
|
-
tracking_method="mediapipe_pose",
|
|
257
|
-
model_complexity=1,
|
|
258
|
-
smoothing=SmoothingConfig(
|
|
259
|
-
window_size=params.smoothing_window,
|
|
260
|
-
polynomial_order=params.polyorder,
|
|
261
|
-
use_bilateral_filter=params.bilateral_filter,
|
|
262
|
-
use_outlier_rejection=params.outlier_rejection,
|
|
263
|
-
),
|
|
264
|
-
detection=DetectionConfig(
|
|
265
|
-
velocity_threshold=params.velocity_threshold,
|
|
266
|
-
min_contact_frames=params.min_contact_frames,
|
|
267
|
-
visibility_threshold=params.visibility_threshold,
|
|
268
|
-
use_curvature_refinement=params.use_curvature,
|
|
269
|
-
),
|
|
270
|
-
drop_detection=None,
|
|
271
|
-
)
|
|
272
|
-
|
|
273
|
-
video_info = VideoInfo(
|
|
274
|
-
source_path=video_path,
|
|
275
|
-
fps=video.fps,
|
|
276
|
-
width=video.width,
|
|
277
|
-
height=video.height,
|
|
278
|
-
duration_s=video.frame_count / video.fps,
|
|
279
|
-
frame_count=video.frame_count,
|
|
280
|
-
codec=video.codec,
|
|
281
|
-
)
|
|
282
|
-
|
|
283
|
-
if verbose and quality_result.warnings:
|
|
284
|
-
print("\n⚠️ Quality Warnings:")
|
|
285
|
-
for warning in quality_result.warnings:
|
|
286
|
-
print(f" - {warning}")
|
|
287
|
-
print()
|
|
397
|
+
_print_quality_warnings(quality_result, verbose)
|
|
288
398
|
|
|
289
399
|
if output_video:
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
debug_w,
|
|
301
|
-
debug_h,
|
|
302
|
-
debug_w,
|
|
303
|
-
debug_h,
|
|
304
|
-
debug_fps,
|
|
305
|
-
timer=timer,
|
|
306
|
-
) as renderer:
|
|
307
|
-
for frame, idx in zip(frames, frame_indices, strict=True):
|
|
308
|
-
annotated = renderer.render_frame(
|
|
309
|
-
frame, smoothed_landmarks[idx], idx, metrics
|
|
310
|
-
)
|
|
311
|
-
renderer.write_frame(annotated)
|
|
312
|
-
|
|
313
|
-
if verbose:
|
|
314
|
-
print(f"Debug video saved: {output_video}")
|
|
400
|
+
_generate_debug_video(
|
|
401
|
+
output_video,
|
|
402
|
+
frames,
|
|
403
|
+
frame_indices,
|
|
404
|
+
smoothed_landmarks,
|
|
405
|
+
metrics,
|
|
406
|
+
video.fps,
|
|
407
|
+
timer,
|
|
408
|
+
verbose,
|
|
409
|
+
)
|
|
315
410
|
|
|
316
411
|
with timer.measure("metrics_validation"):
|
|
317
412
|
validator = CMJMetricsValidator()
|
|
318
413
|
validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
|
|
319
414
|
metrics.validation_result = validation_result
|
|
320
415
|
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
timestamp=create_timestamp(),
|
|
327
|
-
quality_preset=quality_preset.value,
|
|
328
|
-
processing_time_s=processing_time,
|
|
329
|
-
timing_breakdown=stage_times,
|
|
416
|
+
algorithm_config = _create_algorithm_config(params)
|
|
417
|
+
video_info = _create_video_info(video_path, video)
|
|
418
|
+
processing_info = _create_processing_info(start_time, quality_preset, timer)
|
|
419
|
+
result_metadata = _create_result_metadata(
|
|
420
|
+
quality_result, video_info, processing_info, algorithm_config
|
|
330
421
|
)
|
|
331
|
-
|
|
332
|
-
result_metadata = ResultMetadata(
|
|
333
|
-
quality=quality_result,
|
|
334
|
-
video=video_info,
|
|
335
|
-
processing=processing_info,
|
|
336
|
-
algorithm=algorithm_config,
|
|
337
|
-
)
|
|
338
|
-
|
|
339
422
|
metrics.result_metadata = result_metadata
|
|
340
423
|
|
|
341
424
|
if json_output:
|
|
342
|
-
|
|
343
|
-
output_path = Path(json_output)
|
|
344
|
-
metrics_dict = metrics.to_dict()
|
|
345
|
-
json_str = json.dumps(metrics_dict, indent=2)
|
|
346
|
-
output_path.write_text(json_str)
|
|
347
|
-
|
|
348
|
-
if verbose:
|
|
349
|
-
print(f"Metrics written to: {json_output}")
|
|
425
|
+
_save_metrics_to_json(metrics, json_output, timer, verbose)
|
|
350
426
|
|
|
351
|
-
|
|
352
|
-
print("\n⚠️ Validation Results:")
|
|
353
|
-
for issue in validation_result.issues:
|
|
354
|
-
print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
|
|
427
|
+
_print_validation_results(validation_result, verbose)
|
|
355
428
|
|
|
356
429
|
if verbose:
|
|
357
|
-
|
|
358
|
-
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
359
|
-
|
|
360
|
-
print("\n=== Timing Summary ===")
|
|
361
|
-
for stage, duration in stage_times.items():
|
|
362
|
-
percentage = (duration / total_time) * 100
|
|
363
|
-
dur_ms = duration * 1000
|
|
364
|
-
print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
365
|
-
total_ms = total_time * 1000
|
|
366
|
-
print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
367
|
-
print()
|
|
368
|
-
|
|
369
|
-
print(f"\nJump height: {metrics.jump_height:.3f}m")
|
|
370
|
-
print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
|
|
371
|
-
print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
|
|
430
|
+
_print_timing_summary(start_time, timer, metrics)
|
|
372
431
|
|
|
373
432
|
return metrics
|
|
374
433
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.47.
|
|
3
|
+
Version: 0.47.4
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -3,7 +3,7 @@ kinemotion/api.py,sha256=uj3py8jXuG3mYnmsZQnzuCQtWrO4O6gvZzGAMfZne4o,891
|
|
|
3
3
|
kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
|
|
4
4
|
kinemotion/cmj/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
|
|
5
5
|
kinemotion/cmj/analysis.py,sha256=3l0vYQB9tN4HtEO2MPFHVtrdzSmXgwpCm03qzYLCF0c,22196
|
|
6
|
-
kinemotion/cmj/api.py,sha256=
|
|
6
|
+
kinemotion/cmj/api.py,sha256=MLyc4pkzK8wdSI5-pZ5ekfcI9MDwySknAqOUWxWCT9s,16895
|
|
7
7
|
kinemotion/cmj/cli.py,sha256=S4-3YmaCjtGutDwjG475h8nIiw5utiLg5L6hCGfLOHY,9926
|
|
8
8
|
kinemotion/cmj/debug_overlay.py,sha256=fXmWoHhqMLGo4vTtB6Ezs3yLUDOLw63zLIgU2gFlJQU,15892
|
|
9
9
|
kinemotion/cmj/joint_angles.py,sha256=HmheIEiKcQz39cRezk4h-htorOhGNPsqKIR9RsAEKts,9960
|
|
@@ -35,8 +35,8 @@ kinemotion/dropjump/kinematics.py,sha256=PATlGaClutGKJslL-LRIXHmTsvb-xEB8PUIMScU
|
|
|
35
35
|
kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
|
|
36
36
|
kinemotion/dropjump/validation_bounds.py,sha256=fyl04ZV7nfvHkL5eob6oEpV9Hxce6aiOWQ9pclLp7AQ,5077
|
|
37
37
|
kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
|
-
kinemotion-0.47.
|
|
39
|
-
kinemotion-0.47.
|
|
40
|
-
kinemotion-0.47.
|
|
41
|
-
kinemotion-0.47.
|
|
42
|
-
kinemotion-0.47.
|
|
38
|
+
kinemotion-0.47.4.dist-info/METADATA,sha256=-N2sOXvyGeykCej_Uq07hmu51025vrRF8EUFEiO5kVI,26020
|
|
39
|
+
kinemotion-0.47.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
40
|
+
kinemotion-0.47.4.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
|
|
41
|
+
kinemotion-0.47.4.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
|
|
42
|
+
kinemotion-0.47.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|