kinemotion 0.47.2__py3-none-any.whl → 0.47.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/api.py +35 -935
- kinemotion/cmj/__init__.py +1 -1
- kinemotion/cmj/api.py +433 -0
- kinemotion/cmj/cli.py +2 -1
- kinemotion/core/__init__.py +0 -4
- kinemotion/core/timing.py +1 -137
- kinemotion/dropjump/api.py +541 -0
- kinemotion/dropjump/cli.py +5 -5
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/METADATA +1 -1
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/RECORD +13 -11
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/WHEEL +0 -0
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,541 @@
|
|
|
1
|
+
"""Public API for drop jump video analysis."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from numpy.typing import NDArray
|
|
12
|
+
|
|
13
|
+
from ..core.auto_tuning import (
|
|
14
|
+
AnalysisParameters,
|
|
15
|
+
QualityPreset,
|
|
16
|
+
analyze_video_sample,
|
|
17
|
+
auto_tune_parameters,
|
|
18
|
+
)
|
|
19
|
+
from ..core.filtering import reject_outliers
|
|
20
|
+
from ..core.metadata import (
|
|
21
|
+
AlgorithmConfig,
|
|
22
|
+
DetectionConfig,
|
|
23
|
+
DropDetectionConfig,
|
|
24
|
+
ProcessingInfo,
|
|
25
|
+
ResultMetadata,
|
|
26
|
+
SmoothingConfig,
|
|
27
|
+
VideoInfo,
|
|
28
|
+
create_timestamp,
|
|
29
|
+
get_kinemotion_version,
|
|
30
|
+
)
|
|
31
|
+
from ..core.pipeline_utils import (
|
|
32
|
+
apply_expert_overrides,
|
|
33
|
+
apply_smoothing,
|
|
34
|
+
convert_timer_to_stage_names,
|
|
35
|
+
determine_confidence_levels,
|
|
36
|
+
extract_vertical_positions,
|
|
37
|
+
parse_quality_preset,
|
|
38
|
+
print_verbose_parameters,
|
|
39
|
+
process_all_frames,
|
|
40
|
+
process_videos_bulk_generic,
|
|
41
|
+
)
|
|
42
|
+
from ..core.pose import PoseTracker
|
|
43
|
+
from ..core.quality import QualityAssessment, assess_jump_quality
|
|
44
|
+
from ..core.timing import NULL_TIMER, PerformanceTimer, Timer
|
|
45
|
+
from ..core.video_io import VideoProcessor
|
|
46
|
+
from .analysis import (
|
|
47
|
+
detect_ground_contact,
|
|
48
|
+
find_contact_phases,
|
|
49
|
+
)
|
|
50
|
+
from .debug_overlay import DebugOverlayRenderer
|
|
51
|
+
from .kinematics import DropJumpMetrics, calculate_drop_jump_metrics
|
|
52
|
+
from .metrics_validator import DropJumpMetricsValidator
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@dataclass
|
|
56
|
+
class DropJumpVideoResult:
|
|
57
|
+
"""Result of processing a single drop jump video."""
|
|
58
|
+
|
|
59
|
+
video_path: str
|
|
60
|
+
success: bool
|
|
61
|
+
metrics: DropJumpMetrics | None = None
|
|
62
|
+
error: str | None = None
|
|
63
|
+
processing_time: float = 0.0
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class DropJumpVideoConfig:
|
|
68
|
+
"""Configuration for processing a single drop jump video."""
|
|
69
|
+
|
|
70
|
+
video_path: str
|
|
71
|
+
quality: str = "balanced"
|
|
72
|
+
output_video: str | None = None
|
|
73
|
+
json_output: str | None = None
|
|
74
|
+
drop_start_frame: int | None = None
|
|
75
|
+
smoothing_window: int | None = None
|
|
76
|
+
velocity_threshold: float | None = None
|
|
77
|
+
min_contact_frames: int | None = None
|
|
78
|
+
visibility_threshold: float | None = None
|
|
79
|
+
detection_confidence: float | None = None
|
|
80
|
+
tracking_confidence: float | None = None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _assess_dropjump_quality(
|
|
84
|
+
vertical_positions: "NDArray",
|
|
85
|
+
visibilities: "NDArray",
|
|
86
|
+
contact_states: list,
|
|
87
|
+
fps: float,
|
|
88
|
+
) -> tuple:
|
|
89
|
+
"""Assess tracking quality and detect phases.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Tuple of (quality_result, outlier_mask, phases_detected, phase_count)
|
|
93
|
+
"""
|
|
94
|
+
_, outlier_mask = reject_outliers(
|
|
95
|
+
vertical_positions,
|
|
96
|
+
use_ransac=True,
|
|
97
|
+
use_median=True,
|
|
98
|
+
interpolate=False,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
phases = find_contact_phases(contact_states)
|
|
102
|
+
phases_detected = len(phases) > 0
|
|
103
|
+
phase_count = len(phases)
|
|
104
|
+
|
|
105
|
+
quality_result = assess_jump_quality(
|
|
106
|
+
visibilities=visibilities,
|
|
107
|
+
positions=vertical_positions,
|
|
108
|
+
outlier_mask=outlier_mask,
|
|
109
|
+
fps=fps,
|
|
110
|
+
phases_detected=phases_detected,
|
|
111
|
+
phase_count=phase_count,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
return quality_result, outlier_mask, phases_detected, phase_count
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _build_dropjump_metadata(
|
|
118
|
+
video_path: str,
|
|
119
|
+
video: "VideoProcessor",
|
|
120
|
+
params: "AnalysisParameters",
|
|
121
|
+
quality_result: QualityAssessment,
|
|
122
|
+
drop_start_frame: int | None,
|
|
123
|
+
metrics: DropJumpMetrics,
|
|
124
|
+
processing_time: float,
|
|
125
|
+
quality_preset: "QualityPreset",
|
|
126
|
+
timer: Timer,
|
|
127
|
+
) -> ResultMetadata:
|
|
128
|
+
"""Build complete result metadata."""
|
|
129
|
+
drop_frame = None
|
|
130
|
+
if drop_start_frame is None and metrics.drop_start_frame is not None:
|
|
131
|
+
drop_frame = metrics.drop_start_frame
|
|
132
|
+
elif drop_start_frame is not None:
|
|
133
|
+
drop_frame = drop_start_frame
|
|
134
|
+
|
|
135
|
+
algorithm_config = AlgorithmConfig(
|
|
136
|
+
detection_method="forward_search",
|
|
137
|
+
tracking_method="mediapipe_pose",
|
|
138
|
+
model_complexity=1,
|
|
139
|
+
smoothing=SmoothingConfig(
|
|
140
|
+
window_size=params.smoothing_window,
|
|
141
|
+
polynomial_order=params.polyorder,
|
|
142
|
+
use_bilateral_filter=params.bilateral_filter,
|
|
143
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
144
|
+
),
|
|
145
|
+
detection=DetectionConfig(
|
|
146
|
+
velocity_threshold=params.velocity_threshold,
|
|
147
|
+
min_contact_frames=params.min_contact_frames,
|
|
148
|
+
visibility_threshold=params.visibility_threshold,
|
|
149
|
+
use_curvature_refinement=params.use_curvature,
|
|
150
|
+
),
|
|
151
|
+
drop_detection=DropDetectionConfig(
|
|
152
|
+
auto_detect_drop_start=(drop_start_frame is None),
|
|
153
|
+
detected_drop_frame=drop_frame,
|
|
154
|
+
min_stationary_duration_s=0.5,
|
|
155
|
+
),
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
video_info = VideoInfo(
|
|
159
|
+
source_path=video_path,
|
|
160
|
+
fps=video.fps,
|
|
161
|
+
width=video.width,
|
|
162
|
+
height=video.height,
|
|
163
|
+
duration_s=video.frame_count / video.fps,
|
|
164
|
+
frame_count=video.frame_count,
|
|
165
|
+
codec=video.codec,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
169
|
+
|
|
170
|
+
processing_info = ProcessingInfo(
|
|
171
|
+
version=get_kinemotion_version(),
|
|
172
|
+
timestamp=create_timestamp(),
|
|
173
|
+
quality_preset=quality_preset.value,
|
|
174
|
+
processing_time_s=processing_time,
|
|
175
|
+
timing_breakdown=stage_times,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
return ResultMetadata(
|
|
179
|
+
quality=quality_result,
|
|
180
|
+
video=video_info,
|
|
181
|
+
processing=processing_info,
|
|
182
|
+
algorithm=algorithm_config,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _save_dropjump_json(
|
|
187
|
+
json_output: str,
|
|
188
|
+
metrics: DropJumpMetrics,
|
|
189
|
+
timer: Timer,
|
|
190
|
+
verbose: bool,
|
|
191
|
+
) -> None:
|
|
192
|
+
"""Save metrics to JSON file."""
|
|
193
|
+
with timer.measure("json_serialization"):
|
|
194
|
+
output_path = Path(json_output)
|
|
195
|
+
metrics_dict = metrics.to_dict()
|
|
196
|
+
json_str = json.dumps(metrics_dict, indent=2)
|
|
197
|
+
output_path.write_text(json_str)
|
|
198
|
+
|
|
199
|
+
if verbose:
|
|
200
|
+
print(f"Metrics written to: {json_output}")
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def _print_dropjump_summary(
|
|
204
|
+
start_time: float,
|
|
205
|
+
timer: Timer,
|
|
206
|
+
) -> None:
|
|
207
|
+
"""Print verbose timing summary."""
|
|
208
|
+
total_time = time.time() - start_time
|
|
209
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
210
|
+
|
|
211
|
+
print("\n=== Timing Summary ===")
|
|
212
|
+
for stage, duration in stage_times.items():
|
|
213
|
+
percentage = (duration / total_time) * 100
|
|
214
|
+
dur_ms = duration * 1000
|
|
215
|
+
print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
216
|
+
total_ms = total_time * 1000
|
|
217
|
+
print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
218
|
+
print()
|
|
219
|
+
print("Analysis complete!")
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _generate_debug_video(
|
|
223
|
+
output_video: str,
|
|
224
|
+
frames: list,
|
|
225
|
+
frame_indices: list[int],
|
|
226
|
+
video_fps: float,
|
|
227
|
+
smoothed_landmarks: list,
|
|
228
|
+
contact_states: list,
|
|
229
|
+
metrics: DropJumpMetrics,
|
|
230
|
+
timer: Timer | None,
|
|
231
|
+
verbose: bool,
|
|
232
|
+
) -> None:
|
|
233
|
+
"""Generate debug video with overlay."""
|
|
234
|
+
if verbose:
|
|
235
|
+
print(f"Generating debug video: {output_video}")
|
|
236
|
+
|
|
237
|
+
if not frames:
|
|
238
|
+
return
|
|
239
|
+
|
|
240
|
+
timer = timer or NULL_TIMER
|
|
241
|
+
debug_h, debug_w = frames[0].shape[:2]
|
|
242
|
+
|
|
243
|
+
if video_fps > 30:
|
|
244
|
+
debug_fps = video_fps / (video_fps / 30.0)
|
|
245
|
+
else:
|
|
246
|
+
debug_fps = video_fps
|
|
247
|
+
|
|
248
|
+
if len(frames) < len(smoothed_landmarks):
|
|
249
|
+
step = max(1, int(video_fps / 30.0))
|
|
250
|
+
debug_fps = video_fps / step
|
|
251
|
+
|
|
252
|
+
def _render_frames(renderer: DebugOverlayRenderer) -> None:
|
|
253
|
+
for frame, idx in zip(frames, frame_indices, strict=True):
|
|
254
|
+
annotated = renderer.render_frame(
|
|
255
|
+
frame,
|
|
256
|
+
smoothed_landmarks[idx],
|
|
257
|
+
contact_states[idx],
|
|
258
|
+
idx,
|
|
259
|
+
metrics,
|
|
260
|
+
use_com=False,
|
|
261
|
+
)
|
|
262
|
+
renderer.write_frame(annotated)
|
|
263
|
+
|
|
264
|
+
renderer_context = DebugOverlayRenderer(
|
|
265
|
+
output_video,
|
|
266
|
+
debug_w,
|
|
267
|
+
debug_h,
|
|
268
|
+
debug_w,
|
|
269
|
+
debug_h,
|
|
270
|
+
debug_fps,
|
|
271
|
+
timer=timer,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
with timer.measure("debug_video_generation"):
|
|
275
|
+
with renderer_context as renderer:
|
|
276
|
+
_render_frames(renderer)
|
|
277
|
+
|
|
278
|
+
if verbose:
|
|
279
|
+
print(f"Debug video saved: {output_video}")
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def process_dropjump_video(
|
|
283
|
+
video_path: str,
|
|
284
|
+
quality: str = "balanced",
|
|
285
|
+
output_video: str | None = None,
|
|
286
|
+
json_output: str | None = None,
|
|
287
|
+
drop_start_frame: int | None = None,
|
|
288
|
+
smoothing_window: int | None = None,
|
|
289
|
+
velocity_threshold: float | None = None,
|
|
290
|
+
min_contact_frames: int | None = None,
|
|
291
|
+
visibility_threshold: float | None = None,
|
|
292
|
+
detection_confidence: float | None = None,
|
|
293
|
+
tracking_confidence: float | None = None,
|
|
294
|
+
verbose: bool = False,
|
|
295
|
+
timer: Timer | None = None,
|
|
296
|
+
pose_tracker: "PoseTracker | None" = None,
|
|
297
|
+
) -> DropJumpMetrics:
|
|
298
|
+
"""
|
|
299
|
+
Process a single drop jump video and return metrics.
|
|
300
|
+
|
|
301
|
+
Jump height is calculated from flight time using kinematic formula (h = g*t²/8).
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
video_path: Path to the input video file
|
|
305
|
+
quality: Analysis quality preset ("fast", "balanced", or "accurate")
|
|
306
|
+
output_video: Optional path for debug video output
|
|
307
|
+
json_output: Optional path for JSON metrics output
|
|
308
|
+
drop_start_frame: Optional manual drop start frame
|
|
309
|
+
smoothing_window: Optional override for smoothing window
|
|
310
|
+
velocity_threshold: Optional override for velocity threshold
|
|
311
|
+
min_contact_frames: Optional override for minimum contact frames
|
|
312
|
+
visibility_threshold: Optional override for visibility threshold
|
|
313
|
+
detection_confidence: Optional override for pose detection confidence
|
|
314
|
+
tracking_confidence: Optional override for pose tracking confidence
|
|
315
|
+
verbose: Print processing details
|
|
316
|
+
timer: Optional Timer for measuring operations
|
|
317
|
+
pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
DropJumpMetrics object containing analysis results
|
|
321
|
+
|
|
322
|
+
Raises:
|
|
323
|
+
ValueError: If video cannot be processed or parameters are invalid
|
|
324
|
+
FileNotFoundError: If video file does not exist
|
|
325
|
+
"""
|
|
326
|
+
if not Path(video_path).exists():
|
|
327
|
+
raise FileNotFoundError(f"Video file not found: {video_path}")
|
|
328
|
+
|
|
329
|
+
from ..core.determinism import set_deterministic_mode
|
|
330
|
+
|
|
331
|
+
set_deterministic_mode(seed=42)
|
|
332
|
+
|
|
333
|
+
start_time = time.time()
|
|
334
|
+
if timer is None:
|
|
335
|
+
timer = PerformanceTimer()
|
|
336
|
+
|
|
337
|
+
quality_preset = parse_quality_preset(quality)
|
|
338
|
+
|
|
339
|
+
with timer.measure("video_initialization"):
|
|
340
|
+
with VideoProcessor(video_path, timer=timer) as video:
|
|
341
|
+
detection_conf, tracking_conf = determine_confidence_levels(
|
|
342
|
+
quality_preset, detection_confidence, tracking_confidence
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
if verbose:
|
|
346
|
+
print("Processing all frames with MediaPipe pose tracking...")
|
|
347
|
+
|
|
348
|
+
tracker = pose_tracker
|
|
349
|
+
should_close_tracker = False
|
|
350
|
+
|
|
351
|
+
if tracker is None:
|
|
352
|
+
tracker = PoseTracker(
|
|
353
|
+
min_detection_confidence=detection_conf,
|
|
354
|
+
min_tracking_confidence=tracking_conf,
|
|
355
|
+
timer=timer,
|
|
356
|
+
)
|
|
357
|
+
should_close_tracker = True
|
|
358
|
+
|
|
359
|
+
frames, landmarks_sequence, frame_indices = process_all_frames(
|
|
360
|
+
video, tracker, verbose, timer, close_tracker=should_close_tracker
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
with timer.measure("parameter_auto_tuning"):
|
|
364
|
+
characteristics = analyze_video_sample(
|
|
365
|
+
landmarks_sequence, video.fps, video.frame_count
|
|
366
|
+
)
|
|
367
|
+
params = auto_tune_parameters(characteristics, quality_preset)
|
|
368
|
+
|
|
369
|
+
params = apply_expert_overrides(
|
|
370
|
+
params,
|
|
371
|
+
smoothing_window,
|
|
372
|
+
velocity_threshold,
|
|
373
|
+
min_contact_frames,
|
|
374
|
+
visibility_threshold,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
if verbose:
|
|
378
|
+
print_verbose_parameters(
|
|
379
|
+
video, characteristics, quality_preset, params
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
smoothed_landmarks = apply_smoothing(
|
|
383
|
+
landmarks_sequence, params, verbose, timer
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
if verbose:
|
|
387
|
+
print("Extracting foot positions...")
|
|
388
|
+
with timer.measure("vertical_position_extraction"):
|
|
389
|
+
vertical_positions, visibilities = extract_vertical_positions(
|
|
390
|
+
smoothed_landmarks
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
if verbose:
|
|
394
|
+
print("Detecting ground contact...")
|
|
395
|
+
with timer.measure("ground_contact_detection"):
|
|
396
|
+
contact_states = detect_ground_contact(
|
|
397
|
+
vertical_positions,
|
|
398
|
+
velocity_threshold=params.velocity_threshold,
|
|
399
|
+
min_contact_frames=params.min_contact_frames,
|
|
400
|
+
visibility_threshold=params.visibility_threshold,
|
|
401
|
+
visibilities=visibilities,
|
|
402
|
+
window_length=params.smoothing_window,
|
|
403
|
+
polyorder=params.polyorder,
|
|
404
|
+
timer=timer,
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
if verbose:
|
|
408
|
+
print("Calculating metrics...")
|
|
409
|
+
with timer.measure("metrics_calculation"):
|
|
410
|
+
metrics = calculate_drop_jump_metrics(
|
|
411
|
+
contact_states,
|
|
412
|
+
vertical_positions,
|
|
413
|
+
video.fps,
|
|
414
|
+
drop_start_frame=drop_start_frame,
|
|
415
|
+
velocity_threshold=params.velocity_threshold,
|
|
416
|
+
smoothing_window=params.smoothing_window,
|
|
417
|
+
polyorder=params.polyorder,
|
|
418
|
+
use_curvature=params.use_curvature,
|
|
419
|
+
timer=timer,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
if verbose:
|
|
423
|
+
print("Assessing tracking quality...")
|
|
424
|
+
with timer.measure("quality_assessment"):
|
|
425
|
+
quality_result, _, _, _ = _assess_dropjump_quality(
|
|
426
|
+
vertical_positions, visibilities, contact_states, video.fps
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
if verbose and quality_result.warnings:
|
|
430
|
+
print("\n⚠️ Quality Warnings:")
|
|
431
|
+
for warning in quality_result.warnings:
|
|
432
|
+
print(f" - {warning}")
|
|
433
|
+
print()
|
|
434
|
+
|
|
435
|
+
if output_video:
|
|
436
|
+
_generate_debug_video(
|
|
437
|
+
output_video,
|
|
438
|
+
frames,
|
|
439
|
+
frame_indices,
|
|
440
|
+
video.fps,
|
|
441
|
+
smoothed_landmarks,
|
|
442
|
+
contact_states,
|
|
443
|
+
metrics,
|
|
444
|
+
timer,
|
|
445
|
+
verbose,
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
with timer.measure("metrics_validation"):
|
|
449
|
+
validator = DropJumpMetricsValidator()
|
|
450
|
+
validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
|
|
451
|
+
metrics.validation_result = validation_result
|
|
452
|
+
|
|
453
|
+
if verbose and validation_result.issues:
|
|
454
|
+
print("\n⚠️ Validation Results:")
|
|
455
|
+
for issue in validation_result.issues:
|
|
456
|
+
print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
|
|
457
|
+
|
|
458
|
+
processing_time = time.time() - start_time
|
|
459
|
+
result_metadata = _build_dropjump_metadata(
|
|
460
|
+
video_path,
|
|
461
|
+
video,
|
|
462
|
+
params,
|
|
463
|
+
quality_result,
|
|
464
|
+
drop_start_frame,
|
|
465
|
+
metrics,
|
|
466
|
+
processing_time,
|
|
467
|
+
quality_preset,
|
|
468
|
+
timer,
|
|
469
|
+
)
|
|
470
|
+
metrics.result_metadata = result_metadata
|
|
471
|
+
|
|
472
|
+
if json_output:
|
|
473
|
+
_save_dropjump_json(json_output, metrics, timer, verbose)
|
|
474
|
+
|
|
475
|
+
if verbose:
|
|
476
|
+
_print_dropjump_summary(start_time, timer)
|
|
477
|
+
|
|
478
|
+
return metrics
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
def process_dropjump_videos_bulk(
|
|
482
|
+
configs: list[DropJumpVideoConfig],
|
|
483
|
+
max_workers: int = 4,
|
|
484
|
+
progress_callback: Callable[[DropJumpVideoResult], None] | None = None,
|
|
485
|
+
) -> list[DropJumpVideoResult]:
|
|
486
|
+
"""
|
|
487
|
+
Process multiple drop jump videos in parallel.
|
|
488
|
+
"""
|
|
489
|
+
|
|
490
|
+
def error_factory(video_path: str, error_msg: str) -> DropJumpVideoResult:
|
|
491
|
+
return DropJumpVideoResult(
|
|
492
|
+
video_path=video_path, success=False, error=error_msg
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
return process_videos_bulk_generic(
|
|
496
|
+
configs,
|
|
497
|
+
_process_dropjump_video_wrapper,
|
|
498
|
+
error_factory,
|
|
499
|
+
max_workers,
|
|
500
|
+
progress_callback,
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
def _process_dropjump_video_wrapper(config: DropJumpVideoConfig) -> DropJumpVideoResult:
|
|
505
|
+
"""Wrapper function for parallel processing."""
|
|
506
|
+
start_time = time.time()
|
|
507
|
+
|
|
508
|
+
try:
|
|
509
|
+
metrics = process_dropjump_video(
|
|
510
|
+
video_path=config.video_path,
|
|
511
|
+
quality=config.quality,
|
|
512
|
+
output_video=config.output_video,
|
|
513
|
+
json_output=config.json_output,
|
|
514
|
+
drop_start_frame=config.drop_start_frame,
|
|
515
|
+
smoothing_window=config.smoothing_window,
|
|
516
|
+
velocity_threshold=config.velocity_threshold,
|
|
517
|
+
min_contact_frames=config.min_contact_frames,
|
|
518
|
+
visibility_threshold=config.visibility_threshold,
|
|
519
|
+
detection_confidence=config.detection_confidence,
|
|
520
|
+
tracking_confidence=config.tracking_confidence,
|
|
521
|
+
verbose=False,
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
processing_time = time.time() - start_time
|
|
525
|
+
|
|
526
|
+
return DropJumpVideoResult(
|
|
527
|
+
video_path=config.video_path,
|
|
528
|
+
success=True,
|
|
529
|
+
metrics=metrics,
|
|
530
|
+
processing_time=processing_time,
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
except Exception as e:
|
|
534
|
+
processing_time = time.time() - start_time
|
|
535
|
+
|
|
536
|
+
return DropJumpVideoResult(
|
|
537
|
+
video_path=config.video_path,
|
|
538
|
+
success=False,
|
|
539
|
+
error=str(e),
|
|
540
|
+
processing_time=processing_time,
|
|
541
|
+
)
|
kinemotion/dropjump/cli.py
CHANGED
|
@@ -8,16 +8,16 @@ from pathlib import Path
|
|
|
8
8
|
|
|
9
9
|
import click
|
|
10
10
|
|
|
11
|
-
from ..
|
|
11
|
+
from ..core.cli_utils import (
|
|
12
|
+
collect_video_files,
|
|
13
|
+
generate_batch_output_paths,
|
|
14
|
+
)
|
|
15
|
+
from .api import (
|
|
12
16
|
DropJumpVideoConfig,
|
|
13
17
|
DropJumpVideoResult,
|
|
14
18
|
process_dropjump_video,
|
|
15
19
|
process_dropjump_videos_bulk,
|
|
16
20
|
)
|
|
17
|
-
from ..core.cli_utils import (
|
|
18
|
-
collect_video_files,
|
|
19
|
-
generate_batch_output_paths,
|
|
20
|
-
)
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
@dataclass
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.47.
|
|
3
|
+
Version: 0.47.3
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
kinemotion/__init__.py,sha256=wPItmyGJUOFM6GPRVhAEvRz0-ErI7e2qiUREYJ9EfPQ,943
|
|
2
|
-
kinemotion/api.py,sha256=
|
|
2
|
+
kinemotion/api.py,sha256=uj3py8jXuG3mYnmsZQnzuCQtWrO4O6gvZzGAMfZne4o,891
|
|
3
3
|
kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
|
|
4
|
-
kinemotion/cmj/__init__.py,sha256=
|
|
4
|
+
kinemotion/cmj/__init__.py,sha256=SkAw9ka8Yd1Qfv9hcvk22m3EfucROzYrSNGNF5kDzho,113
|
|
5
5
|
kinemotion/cmj/analysis.py,sha256=3l0vYQB9tN4HtEO2MPFHVtrdzSmXgwpCm03qzYLCF0c,22196
|
|
6
|
-
kinemotion/cmj/
|
|
6
|
+
kinemotion/cmj/api.py,sha256=jFHBYgk05pZUG3FKAeMu-40DGROfRzXOeLQMrG4KUJk,15527
|
|
7
|
+
kinemotion/cmj/cli.py,sha256=S4-3YmaCjtGutDwjG475h8nIiw5utiLg5L6hCGfLOHY,9926
|
|
7
8
|
kinemotion/cmj/debug_overlay.py,sha256=fXmWoHhqMLGo4vTtB6Ezs3yLUDOLw63zLIgU2gFlJQU,15892
|
|
8
9
|
kinemotion/cmj/joint_angles.py,sha256=HmheIEiKcQz39cRezk4h-htorOhGNPsqKIR9RsAEKts,9960
|
|
9
10
|
kinemotion/cmj/kinematics.py,sha256=Q-L8M7wG-MJ6EJTq6GO17c8sD5cb0Jg6Hc5vUZr14bA,13673
|
|
10
11
|
kinemotion/cmj/metrics_validator.py,sha256=JAakR4RgNvUc7GM9Aj2TQrtatYpCCCGSzkBMXOldKjw,31455
|
|
11
12
|
kinemotion/cmj/validation_bounds.py,sha256=9ZTo68fl3ooyWjXXyTMRLpK9tFANa_rQf3oHhq7iQGE,11995
|
|
12
|
-
kinemotion/core/__init__.py,sha256=
|
|
13
|
+
kinemotion/core/__init__.py,sha256=U2fnLUGXQ0jbwpXhdksYKDXbeQndEHjn9gwTAEJ9Av0,1451
|
|
13
14
|
kinemotion/core/auto_tuning.py,sha256=wtCUMOhBChVJNXfEeku3GCMW4qED6MF-O_mv2sPTiVQ,11324
|
|
14
15
|
kinemotion/core/cli_utils.py,sha256=sQPbT6XWWau-sm9yuN5c3eS5xNzoQGGXwSz6hQXtRvM,1859
|
|
15
16
|
kinemotion/core/debug_overlay_utils.py,sha256=-goE3w4gBij99y1U4ckU5iaQPS0SupcHplT04DDWzUo,8579
|
|
@@ -22,19 +23,20 @@ kinemotion/core/pipeline_utils.py,sha256=0u7o-UFZX6cOu3NaWpFmEy5ejS0WUKggZ1HSdeZ
|
|
|
22
23
|
kinemotion/core/pose.py,sha256=z1OGuwnc-NdK6Aoc9UYCyPBzomw4eInexOWonZbsEoA,9057
|
|
23
24
|
kinemotion/core/quality.py,sha256=dPGQp08y8DdEUbUdjTThnUOUsALgF0D2sdz50cm6wLI,13098
|
|
24
25
|
kinemotion/core/smoothing.py,sha256=FZmv3rumn0mYKU2y3JPKz46EvD8TVmQ6_GsN_Vp3BdU,15650
|
|
25
|
-
kinemotion/core/timing.py,sha256=
|
|
26
|
+
kinemotion/core/timing.py,sha256=d1rjZc07Nbi5Jrio9AC-zeS0dNAlbPyNIydLz7X75Pk,7804
|
|
26
27
|
kinemotion/core/validation.py,sha256=LmKfSl4Ayw3DgwKD9IrhsPdzp5ia4drLsHA2UuU1SCM,6310
|
|
27
28
|
kinemotion/core/video_io.py,sha256=vCwpWnlW2y29l48dFXokdehQn42w_IQvayxbVTjpXqQ,7863
|
|
28
29
|
kinemotion/dropjump/__init__.py,sha256=tC3H3BrCg8Oj-db-Vrtx4PH_llR1Ppkd5jwaOjhQcLg,862
|
|
29
30
|
kinemotion/dropjump/analysis.py,sha256=p7nnCe7V6vnhQKZVYk--_nhsTvVa_WY-A3zXmyplsew,28211
|
|
30
|
-
kinemotion/dropjump/
|
|
31
|
+
kinemotion/dropjump/api.py,sha256=9tetwjoFdY7Z8PqXpNfaS96L9YVqEkJl7jejGnewhbE,17517
|
|
32
|
+
kinemotion/dropjump/cli.py,sha256=pPQkjpuPUUefGcsRuMvRTtjsxpPSqSgQ9K49rsN_X_o,15823
|
|
31
33
|
kinemotion/dropjump/debug_overlay.py,sha256=8XVuDyZ3nuNoCYkxcUWC7wyEoHyBxx77Sb--B1KiYWw,5974
|
|
32
34
|
kinemotion/dropjump/kinematics.py,sha256=PATlGaClutGKJslL-LRIXHmTsvb-xEB8PUIMScU_K4c,19849
|
|
33
35
|
kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
|
|
34
36
|
kinemotion/dropjump/validation_bounds.py,sha256=fyl04ZV7nfvHkL5eob6oEpV9Hxce6aiOWQ9pclLp7AQ,5077
|
|
35
37
|
kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
36
|
-
kinemotion-0.47.
|
|
37
|
-
kinemotion-0.47.
|
|
38
|
-
kinemotion-0.47.
|
|
39
|
-
kinemotion-0.47.
|
|
40
|
-
kinemotion-0.47.
|
|
38
|
+
kinemotion-0.47.3.dist-info/METADATA,sha256=LNbw4aGD3V6P3wkgH_Puury1B9D4kmwOOqKmkiCTo0Y,26020
|
|
39
|
+
kinemotion-0.47.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
40
|
+
kinemotion-0.47.3.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
|
|
41
|
+
kinemotion-0.47.3.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
|
|
42
|
+
kinemotion-0.47.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|