kinemotion 0.42.0__py3-none-any.whl → 0.43.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/api.py +12 -188
- kinemotion/core/debug_overlay_utils.py +42 -9
- kinemotion/core/video_io.py +21 -9
- kinemotion/dropjump/debug_overlay.py +48 -37
- {kinemotion-0.42.0.dist-info → kinemotion-0.43.0.dist-info}/METADATA +1 -1
- {kinemotion-0.42.0.dist-info → kinemotion-0.43.0.dist-info}/RECORD +9 -9
- {kinemotion-0.42.0.dist-info → kinemotion-0.43.0.dist-info}/WHEEL +0 -0
- {kinemotion-0.42.0.dist-info → kinemotion-0.43.0.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.42.0.dist-info → kinemotion-0.43.0.dist-info}/licenses/LICENSE +0 -0
kinemotion/api.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"
|
|
1
|
+
"Public API for programmatic use of kinemotion analysis."
|
|
2
2
|
|
|
3
3
|
import time
|
|
4
4
|
from collections.abc import Callable
|
|
@@ -37,7 +37,6 @@ from .core.smoothing import smooth_landmarks, smooth_landmarks_advanced
|
|
|
37
37
|
from .core.timing import PerformanceTimer
|
|
38
38
|
from .core.video_io import VideoProcessor
|
|
39
39
|
from .dropjump.analysis import (
|
|
40
|
-
ContactState,
|
|
41
40
|
compute_average_foot_position,
|
|
42
41
|
detect_ground_contact,
|
|
43
42
|
find_contact_phases,
|
|
@@ -370,105 +369,15 @@ def _convert_timer_to_stage_names(
|
|
|
370
369
|
"json_serialization": "JSON serialization",
|
|
371
370
|
"debug_video_generation": "Debug video generation",
|
|
372
371
|
"debug_video_reencode": "Debug video re-encoding",
|
|
372
|
+
"frame_rotation": "Frame rotation",
|
|
373
|
+
"debug_video_resize": "Debug video resizing",
|
|
374
|
+
"debug_video_copy": "Debug video frame copy",
|
|
375
|
+
"debug_video_draw": "Debug video drawing",
|
|
376
|
+
"debug_video_write": "Debug video encoding",
|
|
373
377
|
}
|
|
374
378
|
return {mapping.get(k, k): v for k, v in timer_metrics.items()}
|
|
375
379
|
|
|
376
380
|
|
|
377
|
-
def _generate_dropjump_outputs(
|
|
378
|
-
metrics: DropJumpMetrics,
|
|
379
|
-
json_output: str | None,
|
|
380
|
-
output_video: str | None,
|
|
381
|
-
frames: list,
|
|
382
|
-
smoothed_landmarks: list,
|
|
383
|
-
contact_states: list[ContactState],
|
|
384
|
-
video: VideoProcessor,
|
|
385
|
-
verbose: bool,
|
|
386
|
-
timer: PerformanceTimer | None = None,
|
|
387
|
-
) -> None:
|
|
388
|
-
"""Generate JSON and debug video outputs if requested.
|
|
389
|
-
|
|
390
|
-
Args:
|
|
391
|
-
metrics: Calculated drop jump metrics
|
|
392
|
-
json_output: Optional path for JSON output
|
|
393
|
-
output_video: Optional path for debug video
|
|
394
|
-
frames: List of video frames
|
|
395
|
-
smoothed_landmarks: Smoothed landmark sequence
|
|
396
|
-
contact_states: Ground contact state for each frame
|
|
397
|
-
video: Video processor with dimensions and fps
|
|
398
|
-
verbose: Print progress messages
|
|
399
|
-
timer: Optional PerformanceTimer for measuring operations
|
|
400
|
-
"""
|
|
401
|
-
# Save JSON if requested
|
|
402
|
-
if json_output:
|
|
403
|
-
import json
|
|
404
|
-
|
|
405
|
-
if timer:
|
|
406
|
-
with timer.measure("json_serialization"):
|
|
407
|
-
output_path = Path(json_output)
|
|
408
|
-
metrics_dict = metrics.to_dict()
|
|
409
|
-
json_str = json.dumps(metrics_dict, indent=2)
|
|
410
|
-
output_path.write_text(json_str)
|
|
411
|
-
else:
|
|
412
|
-
output_path = Path(json_output)
|
|
413
|
-
metrics_dict = metrics.to_dict()
|
|
414
|
-
json_str = json.dumps(metrics_dict, indent=2)
|
|
415
|
-
output_path.write_text(json_str)
|
|
416
|
-
|
|
417
|
-
if verbose:
|
|
418
|
-
print(f"Metrics written to: {json_output}")
|
|
419
|
-
|
|
420
|
-
# Generate debug video if requested
|
|
421
|
-
if output_video:
|
|
422
|
-
if verbose:
|
|
423
|
-
print(f"Generating debug video: {output_video}")
|
|
424
|
-
|
|
425
|
-
if timer:
|
|
426
|
-
with timer.measure("debug_video_generation"):
|
|
427
|
-
with DebugOverlayRenderer(
|
|
428
|
-
output_video,
|
|
429
|
-
video.width,
|
|
430
|
-
video.height,
|
|
431
|
-
video.display_width,
|
|
432
|
-
video.display_height,
|
|
433
|
-
video.fps,
|
|
434
|
-
) as renderer:
|
|
435
|
-
for i, frame in enumerate(frames):
|
|
436
|
-
annotated = renderer.render_frame(
|
|
437
|
-
frame,
|
|
438
|
-
smoothed_landmarks[i],
|
|
439
|
-
contact_states[i],
|
|
440
|
-
i,
|
|
441
|
-
metrics,
|
|
442
|
-
use_com=False,
|
|
443
|
-
)
|
|
444
|
-
renderer.write_frame(annotated)
|
|
445
|
-
# Capture re-encoding duration separately
|
|
446
|
-
with timer.measure("debug_video_reencode"):
|
|
447
|
-
pass # Re-encoding happens in context manager __exit__
|
|
448
|
-
else:
|
|
449
|
-
with DebugOverlayRenderer(
|
|
450
|
-
output_video,
|
|
451
|
-
video.width,
|
|
452
|
-
video.height,
|
|
453
|
-
video.display_width,
|
|
454
|
-
video.display_height,
|
|
455
|
-
video.fps,
|
|
456
|
-
) as renderer:
|
|
457
|
-
for i, frame in enumerate(frames):
|
|
458
|
-
annotated = renderer.render_frame(
|
|
459
|
-
frame,
|
|
460
|
-
smoothed_landmarks[i],
|
|
461
|
-
contact_states[i],
|
|
462
|
-
i,
|
|
463
|
-
metrics,
|
|
464
|
-
use_com=False,
|
|
465
|
-
)
|
|
466
|
-
renderer.write_frame(annotated)
|
|
467
|
-
|
|
468
|
-
if verbose:
|
|
469
|
-
print(f"Debug video saved: {output_video}")
|
|
470
|
-
|
|
471
|
-
|
|
472
381
|
@dataclass
|
|
473
382
|
class DropJumpVideoResult:
|
|
474
383
|
"""Result of processing a single drop jump video."""
|
|
@@ -740,6 +649,7 @@ def process_dropjump_video(
|
|
|
740
649
|
video.display_width,
|
|
741
650
|
video.display_height,
|
|
742
651
|
video.fps,
|
|
652
|
+
timer=timer,
|
|
743
653
|
) as renderer:
|
|
744
654
|
for i, frame in enumerate(frames):
|
|
745
655
|
annotated = renderer.render_frame(
|
|
@@ -762,6 +672,7 @@ def process_dropjump_video(
|
|
|
762
672
|
video.display_width,
|
|
763
673
|
video.display_height,
|
|
764
674
|
video.fps,
|
|
675
|
+
timer=timer,
|
|
765
676
|
) as renderer:
|
|
766
677
|
for i, frame in enumerate(frames):
|
|
767
678
|
annotated = renderer.render_frame(
|
|
@@ -843,7 +754,7 @@ def process_dropjump_video(
|
|
|
843
754
|
dur_ms = duration * 1000
|
|
844
755
|
print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
845
756
|
total_ms = total_time * 1000
|
|
846
|
-
print(f"{'Total':.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
757
|
+
print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
847
758
|
print()
|
|
848
759
|
print("Analysis complete!")
|
|
849
760
|
|
|
@@ -994,95 +905,6 @@ class CMJVideoResult:
|
|
|
994
905
|
processing_time: float = 0.0
|
|
995
906
|
|
|
996
907
|
|
|
997
|
-
def _generate_cmj_outputs(
|
|
998
|
-
output_video: str | None,
|
|
999
|
-
json_output: str | None,
|
|
1000
|
-
metrics: CMJMetrics,
|
|
1001
|
-
frames: list,
|
|
1002
|
-
smoothed_landmarks: list,
|
|
1003
|
-
video_width: int,
|
|
1004
|
-
video_height: int,
|
|
1005
|
-
video_display_width: int,
|
|
1006
|
-
video_display_height: int,
|
|
1007
|
-
video_fps: float,
|
|
1008
|
-
verbose: bool,
|
|
1009
|
-
timer: PerformanceTimer | None = None,
|
|
1010
|
-
) -> None:
|
|
1011
|
-
"""Generate JSON and debug video outputs for CMJ analysis.
|
|
1012
|
-
|
|
1013
|
-
Args:
|
|
1014
|
-
output_video: Optional path for debug video output
|
|
1015
|
-
json_output: Optional path for JSON output
|
|
1016
|
-
metrics: Calculated CMJ metrics
|
|
1017
|
-
frames: List of video frames
|
|
1018
|
-
smoothed_landmarks: Smoothed landmark sequence
|
|
1019
|
-
video_width: Video width in pixels
|
|
1020
|
-
video_height: Video height in pixels
|
|
1021
|
-
video_display_width: Display width considering aspect ratio
|
|
1022
|
-
video_display_height: Display height considering aspect ratio
|
|
1023
|
-
video_fps: Video frames per second
|
|
1024
|
-
verbose: Print progress messages
|
|
1025
|
-
timer: Optional PerformanceTimer for measuring operations
|
|
1026
|
-
"""
|
|
1027
|
-
if json_output:
|
|
1028
|
-
import json
|
|
1029
|
-
|
|
1030
|
-
if timer:
|
|
1031
|
-
with timer.measure("json_serialization"):
|
|
1032
|
-
output_path = Path(json_output)
|
|
1033
|
-
metrics_dict = metrics.to_dict()
|
|
1034
|
-
json_str = json.dumps(metrics_dict, indent=2)
|
|
1035
|
-
output_path.write_text(json_str)
|
|
1036
|
-
else:
|
|
1037
|
-
output_path = Path(json_output)
|
|
1038
|
-
metrics_dict = metrics.to_dict()
|
|
1039
|
-
json_str = json.dumps(metrics_dict, indent=2)
|
|
1040
|
-
output_path.write_text(json_str)
|
|
1041
|
-
|
|
1042
|
-
if verbose:
|
|
1043
|
-
print(f"Metrics written to: {json_output}")
|
|
1044
|
-
|
|
1045
|
-
if output_video:
|
|
1046
|
-
if verbose:
|
|
1047
|
-
print(f"Generating debug video: {output_video}")
|
|
1048
|
-
|
|
1049
|
-
if timer:
|
|
1050
|
-
with timer.measure("debug_video_generation"):
|
|
1051
|
-
with CMJDebugOverlayRenderer(
|
|
1052
|
-
output_video,
|
|
1053
|
-
video_width,
|
|
1054
|
-
video_height,
|
|
1055
|
-
video_display_width,
|
|
1056
|
-
video_display_height,
|
|
1057
|
-
video_fps,
|
|
1058
|
-
) as renderer:
|
|
1059
|
-
for i, frame in enumerate(frames):
|
|
1060
|
-
annotated = renderer.render_frame(
|
|
1061
|
-
frame, smoothed_landmarks[i], i, metrics
|
|
1062
|
-
)
|
|
1063
|
-
renderer.write_frame(annotated)
|
|
1064
|
-
# Capture re-encoding duration separately
|
|
1065
|
-
with timer.measure("debug_video_reencode"):
|
|
1066
|
-
pass # Re-encoding happens in context manager __exit__
|
|
1067
|
-
else:
|
|
1068
|
-
with CMJDebugOverlayRenderer(
|
|
1069
|
-
output_video,
|
|
1070
|
-
video_width,
|
|
1071
|
-
video_height,
|
|
1072
|
-
video_display_width,
|
|
1073
|
-
video_display_height,
|
|
1074
|
-
video_fps,
|
|
1075
|
-
) as renderer:
|
|
1076
|
-
for i, frame in enumerate(frames):
|
|
1077
|
-
annotated = renderer.render_frame(
|
|
1078
|
-
frame, smoothed_landmarks[i], i, metrics
|
|
1079
|
-
)
|
|
1080
|
-
renderer.write_frame(annotated)
|
|
1081
|
-
|
|
1082
|
-
if verbose:
|
|
1083
|
-
print(f"Debug video saved: {output_video}")
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
908
|
def process_cmj_video(
|
|
1087
909
|
video_path: str,
|
|
1088
910
|
quality: str = "balanced",
|
|
@@ -1340,6 +1162,7 @@ def process_cmj_video(
|
|
|
1340
1162
|
video.display_width,
|
|
1341
1163
|
video.display_height,
|
|
1342
1164
|
video.fps,
|
|
1165
|
+
timer=timer, # Passing timer here too
|
|
1343
1166
|
) as renderer:
|
|
1344
1167
|
for i, frame in enumerate(frames):
|
|
1345
1168
|
annotated = renderer.render_frame(
|
|
@@ -1357,6 +1180,7 @@ def process_cmj_video(
|
|
|
1357
1180
|
video.display_width,
|
|
1358
1181
|
video.display_height,
|
|
1359
1182
|
video.fps,
|
|
1183
|
+
timer=timer, # Passing timer here too
|
|
1360
1184
|
) as renderer:
|
|
1361
1185
|
for i, frame in enumerate(frames):
|
|
1362
1186
|
annotated = renderer.render_frame(
|
|
@@ -1433,7 +1257,7 @@ def process_cmj_video(
|
|
|
1433
1257
|
dur_ms = duration * 1000
|
|
1434
1258
|
print(f"{stage:.<40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
1435
1259
|
total_ms = total_time * 1000
|
|
1436
|
-
print(f"{'Total':.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
1260
|
+
print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
1437
1261
|
print()
|
|
1438
1262
|
|
|
1439
1263
|
print(f"\nJump height: {metrics.jump_height:.3f}m")
|
|
@@ -9,6 +9,8 @@ from pathlib import Path
|
|
|
9
9
|
import cv2
|
|
10
10
|
import numpy as np
|
|
11
11
|
|
|
12
|
+
from .timing import PerformanceTimer
|
|
13
|
+
|
|
12
14
|
|
|
13
15
|
def create_video_writer(
|
|
14
16
|
output_path: str,
|
|
@@ -104,6 +106,7 @@ class BaseDebugOverlayRenderer:
|
|
|
104
106
|
display_width: int,
|
|
105
107
|
display_height: int,
|
|
106
108
|
fps: float,
|
|
109
|
+
timer: PerformanceTimer | None = None,
|
|
107
110
|
):
|
|
108
111
|
"""
|
|
109
112
|
Initialize overlay renderer.
|
|
@@ -115,16 +118,30 @@ class BaseDebugOverlayRenderer:
|
|
|
115
118
|
display_width: Display width (considering SAR)
|
|
116
119
|
display_height: Display height (considering SAR)
|
|
117
120
|
fps: Frames per second
|
|
121
|
+
timer: Optional PerformanceTimer for measuring operations
|
|
118
122
|
"""
|
|
119
123
|
self.output_path = output_path
|
|
120
124
|
self.width = width
|
|
121
125
|
self.height = height
|
|
122
|
-
self.
|
|
123
|
-
|
|
126
|
+
self.timer = timer
|
|
127
|
+
|
|
128
|
+
# Optimize debug video resolution: Cap max dimension to 720p
|
|
129
|
+
# Reduces software encoding time on single-core Cloud Run instances.
|
|
130
|
+
# while keeping sufficient quality for visual debugging.
|
|
131
|
+
max_dimension = 720
|
|
132
|
+
if max(display_width, display_height) > max_dimension:
|
|
133
|
+
scale = max_dimension / max(display_width, display_height)
|
|
134
|
+
# Ensure dimensions are even for codec compatibility
|
|
135
|
+
self.display_width = int(display_width * scale) // 2 * 2
|
|
136
|
+
self.display_height = int(display_height * scale) // 2 * 2
|
|
137
|
+
else:
|
|
138
|
+
self.display_width = display_width
|
|
139
|
+
self.display_height = display_height
|
|
140
|
+
|
|
124
141
|
# Duration of ffmpeg re-encoding (0.0 if not needed)
|
|
125
142
|
self.reencode_duration_s = 0.0
|
|
126
143
|
self.writer, self.needs_resize, self.used_codec = create_video_writer(
|
|
127
|
-
output_path, width, height, display_width, display_height, fps
|
|
144
|
+
output_path, width, height, self.display_width, self.display_height, fps
|
|
128
145
|
)
|
|
129
146
|
|
|
130
147
|
def write_frame(self, frame: np.ndarray) -> None:
|
|
@@ -148,13 +165,29 @@ class BaseDebugOverlayRenderer:
|
|
|
148
165
|
|
|
149
166
|
# Resize to display dimensions if needed (to handle SAR)
|
|
150
167
|
if self.needs_resize:
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
168
|
+
if self.timer:
|
|
169
|
+
with self.timer.measure("debug_video_resize"):
|
|
170
|
+
frame = cv2.resize(
|
|
171
|
+
frame,
|
|
172
|
+
(self.display_width, self.display_height),
|
|
173
|
+
interpolation=cv2.INTER_LINEAR,
|
|
174
|
+
)
|
|
175
|
+
else:
|
|
176
|
+
frame = cv2.resize(
|
|
177
|
+
frame,
|
|
178
|
+
(self.display_width, self.display_height),
|
|
179
|
+
interpolation=cv2.INTER_LINEAR,
|
|
180
|
+
)
|
|
156
181
|
|
|
157
|
-
|
|
182
|
+
if self.timer:
|
|
183
|
+
with self.timer.measure("debug_video_write"):
|
|
184
|
+
write_overlay_frame(
|
|
185
|
+
self.writer, frame, self.display_width, self.display_height
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
write_overlay_frame(
|
|
189
|
+
self.writer, frame, self.display_width, self.display_height
|
|
190
|
+
)
|
|
158
191
|
|
|
159
192
|
def close(self) -> None:
|
|
160
193
|
"""Release video writer and re-encode if possible."""
|
kinemotion/core/video_io.py
CHANGED
|
@@ -189,15 +189,27 @@ class VideoProcessor:
|
|
|
189
189
|
return None
|
|
190
190
|
|
|
191
191
|
# Apply rotation if video has rotation metadata
|
|
192
|
-
if self.
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
192
|
+
if self.timer:
|
|
193
|
+
with self.timer.measure("frame_rotation"):
|
|
194
|
+
if self.rotation == -90 or self.rotation == 270:
|
|
195
|
+
# -90 degrees = rotate 90 degrees clockwise
|
|
196
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
197
|
+
elif self.rotation == 90 or self.rotation == -270:
|
|
198
|
+
# 90 degrees = rotate 90 degrees counter-clockwise
|
|
199
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
200
|
+
elif self.rotation == 180 or self.rotation == -180:
|
|
201
|
+
# 180 degrees rotation
|
|
202
|
+
frame = cv2.rotate(frame, cv2.ROTATE_180)
|
|
203
|
+
else:
|
|
204
|
+
if self.rotation == -90 or self.rotation == 270:
|
|
205
|
+
# -90 degrees = rotate 90 degrees clockwise
|
|
206
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
|
|
207
|
+
elif self.rotation == 90 or self.rotation == -270:
|
|
208
|
+
# 90 degrees = rotate 90 degrees counter-clockwise
|
|
209
|
+
frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
210
|
+
elif self.rotation == 180 or self.rotation == -180:
|
|
211
|
+
# 180 degrees rotation
|
|
212
|
+
frame = cv2.rotate(frame, cv2.ROTATE_180)
|
|
201
213
|
|
|
202
214
|
return frame
|
|
203
215
|
|
|
@@ -138,42 +138,53 @@ class DebugOverlayRenderer(BaseDebugOverlayRenderer):
|
|
|
138
138
|
Returns:
|
|
139
139
|
Frame with debug overlay
|
|
140
140
|
"""
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
141
|
+
if self.timer:
|
|
142
|
+
with self.timer.measure("debug_video_copy"):
|
|
143
|
+
annotated = frame.copy()
|
|
144
|
+
else:
|
|
145
|
+
annotated = frame.copy()
|
|
146
|
+
|
|
147
|
+
def _draw_overlays() -> None:
|
|
148
|
+
# Draw landmarks
|
|
149
|
+
if landmarks:
|
|
150
|
+
if use_com:
|
|
151
|
+
self._draw_com_visualization(annotated, landmarks, contact_state)
|
|
152
|
+
else:
|
|
153
|
+
self._draw_foot_visualization(annotated, landmarks, contact_state)
|
|
154
|
+
|
|
155
|
+
# Draw contact state
|
|
156
|
+
state_color = (
|
|
157
|
+
(0, 255, 0) if contact_state == ContactState.ON_GROUND else (0, 0, 255)
|
|
158
|
+
)
|
|
159
|
+
cv2.putText(
|
|
160
|
+
annotated,
|
|
161
|
+
f"State: {contact_state.value}",
|
|
162
|
+
(10, 30),
|
|
163
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
164
|
+
1,
|
|
165
|
+
state_color,
|
|
166
|
+
2,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Draw frame number
|
|
170
|
+
cv2.putText(
|
|
171
|
+
annotated,
|
|
172
|
+
f"Frame: {frame_idx}",
|
|
173
|
+
(10, 70),
|
|
174
|
+
cv2.FONT_HERSHEY_SIMPLEX,
|
|
175
|
+
0.7,
|
|
176
|
+
(255, 255, 255),
|
|
177
|
+
2,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
# Draw phase labels
|
|
181
|
+
if metrics:
|
|
182
|
+
self._draw_phase_labels(annotated, frame_idx, metrics)
|
|
183
|
+
|
|
184
|
+
if self.timer:
|
|
185
|
+
with self.timer.measure("debug_video_draw"):
|
|
186
|
+
_draw_overlays()
|
|
187
|
+
else:
|
|
188
|
+
_draw_overlays()
|
|
178
189
|
|
|
179
190
|
return annotated
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: kinemotion
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.43.0
|
|
4
4
|
Summary: Video-based kinematic analysis for athletic performance
|
|
5
5
|
Project-URL: Homepage, https://github.com/feniix/kinemotion
|
|
6
6
|
Project-URL: Repository, https://github.com/feniix/kinemotion
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
kinemotion/__init__.py,sha256=wPItmyGJUOFM6GPRVhAEvRz0-ErI7e2qiUREYJ9EfPQ,943
|
|
2
|
-
kinemotion/api.py,sha256=
|
|
2
|
+
kinemotion/api.py,sha256=oSCc3GNJcIBhjHyCoHbWRur8q3IzMP5y43ngwjyLwkg,52174
|
|
3
3
|
kinemotion/cli.py,sha256=cqYV_7URH0JUDy1VQ_EDLv63FmNO4Ns20m6s1XAjiP4,464
|
|
4
4
|
kinemotion/cmj/__init__.py,sha256=Ynv0-Oco4I3Y1Ubj25m3h9h2XFqeNwpAewXmAYOmwfU,127
|
|
5
5
|
kinemotion/cmj/analysis.py,sha256=qtULzp9uYzm5M0_Qu5YGJpuwjg9fz1VKAg6xg4NJxvM,21639
|
|
@@ -12,7 +12,7 @@ kinemotion/cmj/validation_bounds.py,sha256=9ZTo68fl3ooyWjXXyTMRLpK9tFANa_rQf3oHh
|
|
|
12
12
|
kinemotion/core/__init__.py,sha256=GTLnE_gGIk7HC51epWUXVuNxcvS5lf7UL6qeWRlgMV0,1352
|
|
13
13
|
kinemotion/core/auto_tuning.py,sha256=wtCUMOhBChVJNXfEeku3GCMW4qED6MF-O_mv2sPTiVQ,11324
|
|
14
14
|
kinemotion/core/cli_utils.py,sha256=zbnifPhD-OYofJioeYfJtshuWcl8OAEWtqCGVF4ctAI,7966
|
|
15
|
-
kinemotion/core/debug_overlay_utils.py,sha256=
|
|
15
|
+
kinemotion/core/debug_overlay_utils.py,sha256=vOoWv3vlNdNgPI2R-UwAZKtSpugUUsiokR_kvaz1UWg,9025
|
|
16
16
|
kinemotion/core/determinism.py,sha256=NwVrHqJiVxxFHTBPVy8aDBJH2SLIcYIpdGFp7glblB8,2515
|
|
17
17
|
kinemotion/core/experimental.py,sha256=IK05AF4aZS15ke85hF3TWCqRIXU1AlD_XKzFz735Ua8,3640
|
|
18
18
|
kinemotion/core/filtering.py,sha256=GsC9BB71V07LJJHgS2lsaxUAtJsupcUiwtZFDgODh8c,11417
|
|
@@ -23,17 +23,17 @@ kinemotion/core/quality.py,sha256=dPGQp08y8DdEUbUdjTThnUOUsALgF0D2sdz50cm6wLI,13
|
|
|
23
23
|
kinemotion/core/smoothing.py,sha256=GAfC-jxu1eqNyDjsUXqUBicKx9um5hrk49wz1FxfRNM,15219
|
|
24
24
|
kinemotion/core/timing.py,sha256=bdRg1g7J0-eWB3oj7tEF5Ucp_tiad1IxsM14edAZQu4,1484
|
|
25
25
|
kinemotion/core/validation.py,sha256=LmKfSl4Ayw3DgwKD9IrhsPdzp5ia4drLsHA2UuU1SCM,6310
|
|
26
|
-
kinemotion/core/video_io.py,sha256=
|
|
26
|
+
kinemotion/core/video_io.py,sha256=HyLwn22fKe37j18853YYYrQi0JQWAwxpepPLNkuZKnQ,8586
|
|
27
27
|
kinemotion/dropjump/__init__.py,sha256=tC3H3BrCg8Oj-db-Vrtx4PH_llR1Ppkd5jwaOjhQcLg,862
|
|
28
28
|
kinemotion/dropjump/analysis.py,sha256=MjxO-vps0nz_hXlnGk7cgq3jFenJYzsM0VVpHwnHXsM,27935
|
|
29
29
|
kinemotion/dropjump/cli.py,sha256=n_Wfv3AC6YIgRPYhO3F2nTSai0NR7fh95nAoWjryQeY,16250
|
|
30
|
-
kinemotion/dropjump/debug_overlay.py,sha256=
|
|
30
|
+
kinemotion/dropjump/debug_overlay.py,sha256=9nlnDYB_ZJO4dC1uMhDa4UOYGMBsDpyPQD3WbJjbwpM,6130
|
|
31
31
|
kinemotion/dropjump/kinematics.py,sha256=kH-XM66wlOCYMpjvyb6_Qh5ZebyOfFZ47rmhgE1Tww4,19404
|
|
32
32
|
kinemotion/dropjump/metrics_validator.py,sha256=CrTlGup8q2kyPXtA6HNwm7_yq0AsBaDllG7RVZdXmYA,9342
|
|
33
33
|
kinemotion/dropjump/validation_bounds.py,sha256=5b4I3CKPybuvrbn-nP5yCcGF_sH4Vtyw3a5AWWvWnBk,4645
|
|
34
34
|
kinemotion/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
|
-
kinemotion-0.
|
|
36
|
-
kinemotion-0.
|
|
37
|
-
kinemotion-0.
|
|
38
|
-
kinemotion-0.
|
|
39
|
-
kinemotion-0.
|
|
35
|
+
kinemotion-0.43.0.dist-info/METADATA,sha256=BTxQ2TBeVYsVzux_OTmoE-tOu8gbEBv2oxe7j-3nyDM,26020
|
|
36
|
+
kinemotion-0.43.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
37
|
+
kinemotion-0.43.0.dist-info/entry_points.txt,sha256=zaqnAnjLvcdrk1Qvj5nvXZCZ2gp0prS7it1zTJygcIY,50
|
|
38
|
+
kinemotion-0.43.0.dist-info/licenses/LICENSE,sha256=KZajvqsHw0NoOHOi2q0FZ4NBe9HdV6oey-IPYAtHXfg,1088
|
|
39
|
+
kinemotion-0.43.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|