kinemotion 0.47.2__py3-none-any.whl → 0.47.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/api.py +35 -935
- kinemotion/cmj/__init__.py +1 -1
- kinemotion/cmj/api.py +433 -0
- kinemotion/cmj/cli.py +2 -1
- kinemotion/core/__init__.py +0 -4
- kinemotion/core/timing.py +1 -137
- kinemotion/dropjump/api.py +541 -0
- kinemotion/dropjump/cli.py +5 -5
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/METADATA +1 -1
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/RECORD +13 -11
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/WHEEL +0 -0
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.47.2.dist-info → kinemotion-0.47.3.dist-info}/licenses/LICENSE +0 -0
kinemotion/cmj/__init__.py
CHANGED
kinemotion/cmj/api.py
ADDED
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
"""Public API for CMJ (Counter Movement Jump) video analysis."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
pass
|
|
12
|
+
|
|
13
|
+
from ..core.auto_tuning import (
|
|
14
|
+
analyze_video_sample,
|
|
15
|
+
auto_tune_parameters,
|
|
16
|
+
)
|
|
17
|
+
from ..core.filtering import reject_outliers
|
|
18
|
+
from ..core.metadata import (
|
|
19
|
+
AlgorithmConfig,
|
|
20
|
+
DetectionConfig,
|
|
21
|
+
ProcessingInfo,
|
|
22
|
+
ResultMetadata,
|
|
23
|
+
SmoothingConfig,
|
|
24
|
+
VideoInfo,
|
|
25
|
+
create_timestamp,
|
|
26
|
+
get_kinemotion_version,
|
|
27
|
+
)
|
|
28
|
+
from ..core.pipeline_utils import (
|
|
29
|
+
apply_expert_overrides,
|
|
30
|
+
apply_smoothing,
|
|
31
|
+
convert_timer_to_stage_names,
|
|
32
|
+
determine_confidence_levels,
|
|
33
|
+
extract_vertical_positions,
|
|
34
|
+
parse_quality_preset,
|
|
35
|
+
print_verbose_parameters,
|
|
36
|
+
process_all_frames,
|
|
37
|
+
process_videos_bulk_generic,
|
|
38
|
+
)
|
|
39
|
+
from ..core.pose import PoseTracker
|
|
40
|
+
from ..core.quality import assess_jump_quality
|
|
41
|
+
from ..core.timing import PerformanceTimer, Timer
|
|
42
|
+
from ..core.video_io import VideoProcessor
|
|
43
|
+
from .analysis import compute_signed_velocity, detect_cmj_phases
|
|
44
|
+
from .debug_overlay import CMJDebugOverlayRenderer
|
|
45
|
+
from .kinematics import CMJMetrics, calculate_cmj_metrics
|
|
46
|
+
from .metrics_validator import CMJMetricsValidator
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class CMJVideoConfig:
|
|
51
|
+
"""Configuration for processing a single CMJ video."""
|
|
52
|
+
|
|
53
|
+
video_path: str
|
|
54
|
+
quality: str = "balanced"
|
|
55
|
+
output_video: str | None = None
|
|
56
|
+
json_output: str | None = None
|
|
57
|
+
smoothing_window: int | None = None
|
|
58
|
+
velocity_threshold: float | None = None
|
|
59
|
+
min_contact_frames: int | None = None
|
|
60
|
+
visibility_threshold: float | None = None
|
|
61
|
+
detection_confidence: float | None = None
|
|
62
|
+
tracking_confidence: float | None = None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class CMJVideoResult:
|
|
67
|
+
"""Result of processing a single CMJ video."""
|
|
68
|
+
|
|
69
|
+
video_path: str
|
|
70
|
+
success: bool
|
|
71
|
+
metrics: CMJMetrics | None = None
|
|
72
|
+
error: str | None = None
|
|
73
|
+
processing_time: float = 0.0
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def process_cmj_video(
|
|
77
|
+
video_path: str,
|
|
78
|
+
quality: str = "balanced",
|
|
79
|
+
output_video: str | None = None,
|
|
80
|
+
json_output: str | None = None,
|
|
81
|
+
smoothing_window: int | None = None,
|
|
82
|
+
velocity_threshold: float | None = None,
|
|
83
|
+
min_contact_frames: int | None = None,
|
|
84
|
+
visibility_threshold: float | None = None,
|
|
85
|
+
detection_confidence: float | None = None,
|
|
86
|
+
tracking_confidence: float | None = None,
|
|
87
|
+
verbose: bool = False,
|
|
88
|
+
timer: Timer | None = None,
|
|
89
|
+
pose_tracker: "PoseTracker | None" = None,
|
|
90
|
+
) -> CMJMetrics:
|
|
91
|
+
"""
|
|
92
|
+
Process a single CMJ video and return metrics.
|
|
93
|
+
|
|
94
|
+
CMJ (Counter Movement Jump) is performed at floor level without a drop box.
|
|
95
|
+
Athletes start standing, perform a countermovement (eccentric phase), then
|
|
96
|
+
jump upward (concentric phase).
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
video_path: Path to the input video file
|
|
100
|
+
quality: Analysis quality preset ("fast", "balanced", or "accurate")
|
|
101
|
+
output_video: Optional path for debug video output
|
|
102
|
+
json_output: Optional path for JSON metrics output
|
|
103
|
+
smoothing_window: Optional override for smoothing window
|
|
104
|
+
velocity_threshold: Optional override for velocity threshold
|
|
105
|
+
min_contact_frames: Optional override for minimum contact frames
|
|
106
|
+
visibility_threshold: Optional override for visibility threshold
|
|
107
|
+
detection_confidence: Optional override for pose detection confidence
|
|
108
|
+
tracking_confidence: Optional override for pose tracking confidence
|
|
109
|
+
verbose: Print processing details
|
|
110
|
+
timer: Optional Timer for measuring operations
|
|
111
|
+
pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
CMJMetrics object containing analysis results
|
|
115
|
+
|
|
116
|
+
Raises:
|
|
117
|
+
ValueError: If video cannot be processed or parameters are invalid
|
|
118
|
+
FileNotFoundError: If video file does not exist
|
|
119
|
+
"""
|
|
120
|
+
if not Path(video_path).exists():
|
|
121
|
+
raise FileNotFoundError(f"Video file not found: {video_path}")
|
|
122
|
+
|
|
123
|
+
start_time = time.time()
|
|
124
|
+
if timer is None:
|
|
125
|
+
timer = PerformanceTimer()
|
|
126
|
+
|
|
127
|
+
quality_preset = parse_quality_preset(quality)
|
|
128
|
+
|
|
129
|
+
with timer.measure("video_initialization"):
|
|
130
|
+
with VideoProcessor(video_path, timer=timer) as video:
|
|
131
|
+
if verbose:
|
|
132
|
+
print(
|
|
133
|
+
f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
|
|
134
|
+
f"{video.frame_count} frames"
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
det_conf, track_conf = determine_confidence_levels(
|
|
138
|
+
quality_preset, detection_confidence, tracking_confidence
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
if verbose:
|
|
142
|
+
print("Processing all frames with MediaPipe pose tracking...")
|
|
143
|
+
|
|
144
|
+
tracker = pose_tracker
|
|
145
|
+
should_close_tracker = False
|
|
146
|
+
|
|
147
|
+
if tracker is None:
|
|
148
|
+
tracker = PoseTracker(
|
|
149
|
+
min_detection_confidence=det_conf,
|
|
150
|
+
min_tracking_confidence=track_conf,
|
|
151
|
+
timer=timer,
|
|
152
|
+
)
|
|
153
|
+
should_close_tracker = True
|
|
154
|
+
|
|
155
|
+
frames, landmarks_sequence, frame_indices = process_all_frames(
|
|
156
|
+
video, tracker, verbose, timer, close_tracker=should_close_tracker
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
with timer.measure("parameter_auto_tuning"):
|
|
160
|
+
characteristics = analyze_video_sample(
|
|
161
|
+
landmarks_sequence, video.fps, video.frame_count
|
|
162
|
+
)
|
|
163
|
+
params = auto_tune_parameters(characteristics, quality_preset)
|
|
164
|
+
|
|
165
|
+
params = apply_expert_overrides(
|
|
166
|
+
params,
|
|
167
|
+
smoothing_window,
|
|
168
|
+
velocity_threshold,
|
|
169
|
+
min_contact_frames,
|
|
170
|
+
visibility_threshold,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
if verbose:
|
|
174
|
+
print_verbose_parameters(
|
|
175
|
+
video, characteristics, quality_preset, params
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
smoothed_landmarks = apply_smoothing(
|
|
179
|
+
landmarks_sequence, params, verbose, timer
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
if verbose:
|
|
183
|
+
print("Extracting vertical positions (Hip and Foot)...")
|
|
184
|
+
with timer.measure("vertical_position_extraction"):
|
|
185
|
+
vertical_positions, visibilities = extract_vertical_positions(
|
|
186
|
+
smoothed_landmarks, target="hip"
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
foot_positions, _ = extract_vertical_positions(
|
|
190
|
+
smoothed_landmarks, target="foot"
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
tracking_method = "hip_hybrid"
|
|
194
|
+
|
|
195
|
+
if verbose:
|
|
196
|
+
print("Detecting CMJ phases...")
|
|
197
|
+
with timer.measure("phase_detection"):
|
|
198
|
+
phases = detect_cmj_phases(
|
|
199
|
+
vertical_positions,
|
|
200
|
+
video.fps,
|
|
201
|
+
window_length=params.smoothing_window,
|
|
202
|
+
polyorder=params.polyorder,
|
|
203
|
+
landing_positions=foot_positions,
|
|
204
|
+
timer=timer,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
if phases is None:
|
|
208
|
+
raise ValueError("Could not detect CMJ phases in video")
|
|
209
|
+
|
|
210
|
+
standing_end, lowest_point, takeoff_frame, landing_frame = phases
|
|
211
|
+
|
|
212
|
+
if verbose:
|
|
213
|
+
print("Calculating metrics...")
|
|
214
|
+
with timer.measure("metrics_calculation"):
|
|
215
|
+
velocities = compute_signed_velocity(
|
|
216
|
+
vertical_positions,
|
|
217
|
+
window_length=params.smoothing_window,
|
|
218
|
+
polyorder=params.polyorder,
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
metrics = calculate_cmj_metrics(
|
|
222
|
+
vertical_positions,
|
|
223
|
+
velocities,
|
|
224
|
+
standing_end,
|
|
225
|
+
lowest_point,
|
|
226
|
+
takeoff_frame,
|
|
227
|
+
landing_frame,
|
|
228
|
+
video.fps,
|
|
229
|
+
tracking_method=tracking_method,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
if verbose:
|
|
233
|
+
print("Assessing tracking quality...")
|
|
234
|
+
with timer.measure("quality_assessment"):
|
|
235
|
+
_, outlier_mask = reject_outliers(
|
|
236
|
+
vertical_positions,
|
|
237
|
+
use_ransac=True,
|
|
238
|
+
use_median=True,
|
|
239
|
+
interpolate=False,
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
phases_detected = True
|
|
243
|
+
phase_count = 4
|
|
244
|
+
|
|
245
|
+
quality_result = assess_jump_quality(
|
|
246
|
+
visibilities=visibilities,
|
|
247
|
+
positions=vertical_positions,
|
|
248
|
+
outlier_mask=outlier_mask,
|
|
249
|
+
fps=video.fps,
|
|
250
|
+
phases_detected=phases_detected,
|
|
251
|
+
phase_count=phase_count,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
algorithm_config = AlgorithmConfig(
|
|
255
|
+
detection_method="backward_search",
|
|
256
|
+
tracking_method="mediapipe_pose",
|
|
257
|
+
model_complexity=1,
|
|
258
|
+
smoothing=SmoothingConfig(
|
|
259
|
+
window_size=params.smoothing_window,
|
|
260
|
+
polynomial_order=params.polyorder,
|
|
261
|
+
use_bilateral_filter=params.bilateral_filter,
|
|
262
|
+
use_outlier_rejection=params.outlier_rejection,
|
|
263
|
+
),
|
|
264
|
+
detection=DetectionConfig(
|
|
265
|
+
velocity_threshold=params.velocity_threshold,
|
|
266
|
+
min_contact_frames=params.min_contact_frames,
|
|
267
|
+
visibility_threshold=params.visibility_threshold,
|
|
268
|
+
use_curvature_refinement=params.use_curvature,
|
|
269
|
+
),
|
|
270
|
+
drop_detection=None,
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
video_info = VideoInfo(
|
|
274
|
+
source_path=video_path,
|
|
275
|
+
fps=video.fps,
|
|
276
|
+
width=video.width,
|
|
277
|
+
height=video.height,
|
|
278
|
+
duration_s=video.frame_count / video.fps,
|
|
279
|
+
frame_count=video.frame_count,
|
|
280
|
+
codec=video.codec,
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if verbose and quality_result.warnings:
|
|
284
|
+
print("\n⚠️ Quality Warnings:")
|
|
285
|
+
for warning in quality_result.warnings:
|
|
286
|
+
print(f" - {warning}")
|
|
287
|
+
print()
|
|
288
|
+
|
|
289
|
+
if output_video:
|
|
290
|
+
if verbose:
|
|
291
|
+
print(f"Generating debug video: {output_video}")
|
|
292
|
+
|
|
293
|
+
debug_h, debug_w = frames[0].shape[:2]
|
|
294
|
+
step = max(1, int(video.fps / 30.0))
|
|
295
|
+
debug_fps = video.fps / step
|
|
296
|
+
|
|
297
|
+
with timer.measure("debug_video_generation"):
|
|
298
|
+
with CMJDebugOverlayRenderer(
|
|
299
|
+
output_video,
|
|
300
|
+
debug_w,
|
|
301
|
+
debug_h,
|
|
302
|
+
debug_w,
|
|
303
|
+
debug_h,
|
|
304
|
+
debug_fps,
|
|
305
|
+
timer=timer,
|
|
306
|
+
) as renderer:
|
|
307
|
+
for frame, idx in zip(frames, frame_indices, strict=True):
|
|
308
|
+
annotated = renderer.render_frame(
|
|
309
|
+
frame, smoothed_landmarks[idx], idx, metrics
|
|
310
|
+
)
|
|
311
|
+
renderer.write_frame(annotated)
|
|
312
|
+
|
|
313
|
+
if verbose:
|
|
314
|
+
print(f"Debug video saved: {output_video}")
|
|
315
|
+
|
|
316
|
+
with timer.measure("metrics_validation"):
|
|
317
|
+
validator = CMJMetricsValidator()
|
|
318
|
+
validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
|
|
319
|
+
metrics.validation_result = validation_result
|
|
320
|
+
|
|
321
|
+
processing_time = time.time() - start_time
|
|
322
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
323
|
+
|
|
324
|
+
processing_info = ProcessingInfo(
|
|
325
|
+
version=get_kinemotion_version(),
|
|
326
|
+
timestamp=create_timestamp(),
|
|
327
|
+
quality_preset=quality_preset.value,
|
|
328
|
+
processing_time_s=processing_time,
|
|
329
|
+
timing_breakdown=stage_times,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
result_metadata = ResultMetadata(
|
|
333
|
+
quality=quality_result,
|
|
334
|
+
video=video_info,
|
|
335
|
+
processing=processing_info,
|
|
336
|
+
algorithm=algorithm_config,
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
metrics.result_metadata = result_metadata
|
|
340
|
+
|
|
341
|
+
if json_output:
|
|
342
|
+
with timer.measure("json_serialization"):
|
|
343
|
+
output_path = Path(json_output)
|
|
344
|
+
metrics_dict = metrics.to_dict()
|
|
345
|
+
json_str = json.dumps(metrics_dict, indent=2)
|
|
346
|
+
output_path.write_text(json_str)
|
|
347
|
+
|
|
348
|
+
if verbose:
|
|
349
|
+
print(f"Metrics written to: {json_output}")
|
|
350
|
+
|
|
351
|
+
if verbose and validation_result.issues:
|
|
352
|
+
print("\n⚠️ Validation Results:")
|
|
353
|
+
for issue in validation_result.issues:
|
|
354
|
+
print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
|
|
355
|
+
|
|
356
|
+
if verbose:
|
|
357
|
+
total_time = time.time() - start_time
|
|
358
|
+
stage_times = convert_timer_to_stage_names(timer.get_metrics())
|
|
359
|
+
|
|
360
|
+
print("\n=== Timing Summary ===")
|
|
361
|
+
for stage, duration in stage_times.items():
|
|
362
|
+
percentage = (duration / total_time) * 100
|
|
363
|
+
dur_ms = duration * 1000
|
|
364
|
+
print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
|
|
365
|
+
total_ms = total_time * 1000
|
|
366
|
+
print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
|
|
367
|
+
print()
|
|
368
|
+
|
|
369
|
+
print(f"\nJump height: {metrics.jump_height:.3f}m")
|
|
370
|
+
print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
|
|
371
|
+
print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
|
|
372
|
+
|
|
373
|
+
return metrics
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def process_cmj_videos_bulk(
|
|
377
|
+
configs: list[CMJVideoConfig],
|
|
378
|
+
max_workers: int = 4,
|
|
379
|
+
progress_callback: Callable[[CMJVideoResult], None] | None = None,
|
|
380
|
+
) -> list[CMJVideoResult]:
|
|
381
|
+
"""
|
|
382
|
+
Process multiple CMJ videos in parallel.
|
|
383
|
+
"""
|
|
384
|
+
|
|
385
|
+
def error_factory(video_path: str, error_msg: str) -> CMJVideoResult:
|
|
386
|
+
return CMJVideoResult(video_path=video_path, success=False, error=error_msg)
|
|
387
|
+
|
|
388
|
+
return process_videos_bulk_generic(
|
|
389
|
+
configs,
|
|
390
|
+
_process_cmj_video_wrapper,
|
|
391
|
+
error_factory,
|
|
392
|
+
max_workers,
|
|
393
|
+
progress_callback,
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
|
|
398
|
+
"""Wrapper function for parallel CMJ processing."""
|
|
399
|
+
start_time = time.time()
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
metrics = process_cmj_video(
|
|
403
|
+
video_path=config.video_path,
|
|
404
|
+
quality=config.quality,
|
|
405
|
+
output_video=config.output_video,
|
|
406
|
+
json_output=config.json_output,
|
|
407
|
+
smoothing_window=config.smoothing_window,
|
|
408
|
+
velocity_threshold=config.velocity_threshold,
|
|
409
|
+
min_contact_frames=config.min_contact_frames,
|
|
410
|
+
visibility_threshold=config.visibility_threshold,
|
|
411
|
+
detection_confidence=config.detection_confidence,
|
|
412
|
+
tracking_confidence=config.tracking_confidence,
|
|
413
|
+
verbose=False,
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
processing_time = time.time() - start_time
|
|
417
|
+
|
|
418
|
+
return CMJVideoResult(
|
|
419
|
+
video_path=config.video_path,
|
|
420
|
+
success=True,
|
|
421
|
+
metrics=metrics,
|
|
422
|
+
processing_time=processing_time,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
except Exception as e:
|
|
426
|
+
processing_time = time.time() - start_time
|
|
427
|
+
|
|
428
|
+
return CMJVideoResult(
|
|
429
|
+
video_path=config.video_path,
|
|
430
|
+
success=False,
|
|
431
|
+
error=str(e),
|
|
432
|
+
processing_time=processing_time,
|
|
433
|
+
)
|
kinemotion/cmj/cli.py
CHANGED
|
@@ -6,13 +6,14 @@ from dataclasses import dataclass
|
|
|
6
6
|
|
|
7
7
|
import click
|
|
8
8
|
|
|
9
|
-
from ..api import CMJMetrics, process_cmj_video
|
|
10
9
|
from ..core.auto_tuning import QualityPreset
|
|
11
10
|
from ..core.cli_utils import (
|
|
12
11
|
collect_video_files,
|
|
13
12
|
common_output_options,
|
|
14
13
|
generate_batch_output_paths,
|
|
15
14
|
)
|
|
15
|
+
from .api import process_cmj_video
|
|
16
|
+
from .kinematics import CMJMetrics
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
@dataclass
|
kinemotion/core/__init__.py
CHANGED
|
@@ -24,9 +24,7 @@ from .smoothing import (
|
|
|
24
24
|
)
|
|
25
25
|
from .timing import (
|
|
26
26
|
NULL_TIMER,
|
|
27
|
-
CompositeTimer,
|
|
28
27
|
NullTimer,
|
|
29
|
-
OpenTelemetryTimer,
|
|
30
28
|
PerformanceTimer,
|
|
31
29
|
Timer,
|
|
32
30
|
)
|
|
@@ -59,8 +57,6 @@ __all__ = [
|
|
|
59
57
|
"Timer",
|
|
60
58
|
"NullTimer",
|
|
61
59
|
"NULL_TIMER",
|
|
62
|
-
"CompositeTimer",
|
|
63
|
-
"OpenTelemetryTimer",
|
|
64
60
|
# Video I/O
|
|
65
61
|
"VideoProcessor",
|
|
66
62
|
]
|
kinemotion/core/timing.py
CHANGED
|
@@ -26,43 +26,9 @@ Example:
|
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
28
|
import time
|
|
29
|
-
from contextlib import AbstractContextManager
|
|
29
|
+
from contextlib import AbstractContextManager
|
|
30
30
|
from typing import Protocol, runtime_checkable
|
|
31
31
|
|
|
32
|
-
# OpenTelemetry related imports, guarded by try-except for optional dependency
|
|
33
|
-
_trace_module = None # This will hold the actual 'trace' module if imported
|
|
34
|
-
_otel_tracer_class = None # This will hold the actual 'Tracer' class if imported
|
|
35
|
-
|
|
36
|
-
try:
|
|
37
|
-
import opentelemetry.trace as _trace_module_import # Import the module directly
|
|
38
|
-
|
|
39
|
-
_otel_tracer_class = (
|
|
40
|
-
_trace_module_import.Tracer
|
|
41
|
-
) # Get the Tracer class from the module
|
|
42
|
-
_trace_module = (
|
|
43
|
-
_trace_module_import # Expose the trace module globally after successful import
|
|
44
|
-
)
|
|
45
|
-
except ImportError:
|
|
46
|
-
pass # No OTel, so these remain None
|
|
47
|
-
|
|
48
|
-
# Now define the global/module-level variables used elsewhere
|
|
49
|
-
# Conditionally expose 'trace' and 'Tracer' aliases
|
|
50
|
-
trace = _trace_module # This will be the actual module or None
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class Tracer: # Dummy for type hints if actual Tracer is not available
|
|
54
|
-
pass
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
if _otel_tracer_class:
|
|
58
|
-
Tracer = _otel_tracer_class # type: ignore # Override dummy if actual Tracer is available
|
|
59
|
-
|
|
60
|
-
# This _OPENTELEMETRY_AVAILABLE variable is assigned only once,
|
|
61
|
-
# after the try-except block
|
|
62
|
-
_OPENTELEMETRY_AVAILABLE = bool(
|
|
63
|
-
_otel_tracer_class
|
|
64
|
-
) # True if Tracer class was successfully loaded
|
|
65
|
-
|
|
66
32
|
|
|
67
33
|
@runtime_checkable
|
|
68
34
|
class Timer(Protocol):
|
|
@@ -279,105 +245,3 @@ class PerformanceTimer:
|
|
|
279
245
|
A copy of the metrics dictionary to prevent external modification.
|
|
280
246
|
"""
|
|
281
247
|
return self.metrics.copy()
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
@contextmanager
|
|
285
|
-
def _composite_context_manager(contexts: list[AbstractContextManager[None]]):
|
|
286
|
-
"""Helper to combine multiple context managers into one.
|
|
287
|
-
|
|
288
|
-
Uses ExitStack to manage entering and exiting multiple contexts transparently.
|
|
289
|
-
"""
|
|
290
|
-
with ExitStack() as stack:
|
|
291
|
-
for ctx in contexts:
|
|
292
|
-
stack.enter_context(ctx)
|
|
293
|
-
yield
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
class CompositeTimer:
|
|
297
|
-
"""Timer that delegates measurements to multiple underlying timers.
|
|
298
|
-
|
|
299
|
-
Useful for enabling both local performance timing (for JSON output)
|
|
300
|
-
and distributed tracing (OpenTelemetry) simultaneously.
|
|
301
|
-
"""
|
|
302
|
-
|
|
303
|
-
__slots__ = ("timers",)
|
|
304
|
-
|
|
305
|
-
def __init__(self, timers: list[Timer]) -> None:
|
|
306
|
-
"""Initialize composite timer.
|
|
307
|
-
|
|
308
|
-
Args:
|
|
309
|
-
timers: List of timer instances to delegate to
|
|
310
|
-
"""
|
|
311
|
-
self.timers = timers
|
|
312
|
-
|
|
313
|
-
def measure(self, name: str) -> AbstractContextManager[None]:
|
|
314
|
-
"""Measure using all underlying timers.
|
|
315
|
-
|
|
316
|
-
Args:
|
|
317
|
-
name: Name of the operation
|
|
318
|
-
|
|
319
|
-
Returns:
|
|
320
|
-
Context manager that manages all underlying timers
|
|
321
|
-
"""
|
|
322
|
-
contexts = [timer.measure(name) for timer in self.timers]
|
|
323
|
-
return _composite_context_manager(contexts)
|
|
324
|
-
|
|
325
|
-
def get_metrics(self) -> dict[str, float]:
|
|
326
|
-
"""Get combined metrics from all timers.
|
|
327
|
-
|
|
328
|
-
Returns:
|
|
329
|
-
Merged dictionary of metrics
|
|
330
|
-
"""
|
|
331
|
-
metrics = {}
|
|
332
|
-
for timer in self.timers:
|
|
333
|
-
metrics.update(timer.get_metrics())
|
|
334
|
-
return metrics
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
class OpenTelemetryTimer:
|
|
338
|
-
"""Timer implementation that creates OpenTelemetry spans.
|
|
339
|
-
|
|
340
|
-
Maps 'measure' calls to OTel spans. Requires opentelemetry-api installed.
|
|
341
|
-
"""
|
|
342
|
-
|
|
343
|
-
__slots__ = ("tracer",)
|
|
344
|
-
|
|
345
|
-
def __init__(self, tracer: Tracer | None = None) -> None:
|
|
346
|
-
"""Initialize OTel timer.
|
|
347
|
-
|
|
348
|
-
Args:
|
|
349
|
-
tracer: Optional OTel tracer. If None, gets tracer for module name.
|
|
350
|
-
"""
|
|
351
|
-
if not _OPENTELEMETRY_AVAILABLE:
|
|
352
|
-
self.tracer = None # Always initialize self.tracer for __slots__
|
|
353
|
-
return
|
|
354
|
-
|
|
355
|
-
if trace is not None:
|
|
356
|
-
self.tracer = tracer or trace.get_tracer(__name__)
|
|
357
|
-
else:
|
|
358
|
-
# This branch should ideally not be reached if _OPENTELEMETRY_AVAILABLE
|
|
359
|
-
# is True but trace is None (meaning import succeeded but trace was not what
|
|
360
|
-
# expected). Defensive programming: ensure self.tracer is set.
|
|
361
|
-
self.tracer = None
|
|
362
|
-
|
|
363
|
-
def measure(self, name: str) -> AbstractContextManager[None]:
|
|
364
|
-
"""Start an OpenTelemetry span.
|
|
365
|
-
|
|
366
|
-
Args:
|
|
367
|
-
name: Name of the span
|
|
368
|
-
|
|
369
|
-
Returns:
|
|
370
|
-
Span context manager (compatible with AbstractContextManager)
|
|
371
|
-
"""
|
|
372
|
-
if not _OPENTELEMETRY_AVAILABLE or self.tracer is None:
|
|
373
|
-
return _NULL_CONTEXT # Return the no-op context
|
|
374
|
-
|
|
375
|
-
return self.tracer.start_as_current_span(name)
|
|
376
|
-
|
|
377
|
-
def get_metrics(self) -> dict[str, float]:
|
|
378
|
-
"""Return empty metrics (OTel handles export asynchronously).
|
|
379
|
-
|
|
380
|
-
Returns:
|
|
381
|
-
Empty dictionary
|
|
382
|
-
"""
|
|
383
|
-
return {}
|