kinemotion 0.47.2__py3-none-any.whl → 0.47.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  """Counter Movement Jump (CMJ) analysis module."""
2
2
 
3
- from kinemotion.cmj.kinematics import CMJMetrics
3
+ from .kinematics import CMJMetrics
4
4
 
5
5
  __all__ = ["CMJMetrics"]
kinemotion/cmj/api.py ADDED
@@ -0,0 +1,492 @@
1
+ """Public API for CMJ (Counter Movement Jump) video analysis."""
2
+
3
+ import json
4
+ import time
5
+ from collections.abc import Callable
6
+ from dataclasses import dataclass
7
+ from pathlib import Path
8
+
9
+ import numpy as np
10
+ from numpy.typing import NDArray
11
+
12
+ from ..core.auto_tuning import (
13
+ AnalysisParameters,
14
+ QualityPreset,
15
+ analyze_video_sample,
16
+ auto_tune_parameters,
17
+ )
18
+ from ..core.filtering import reject_outliers
19
+ from ..core.metadata import (
20
+ AlgorithmConfig,
21
+ DetectionConfig,
22
+ ProcessingInfo,
23
+ ResultMetadata,
24
+ SmoothingConfig,
25
+ VideoInfo,
26
+ create_timestamp,
27
+ get_kinemotion_version,
28
+ )
29
+ from ..core.pipeline_utils import (
30
+ apply_expert_overrides,
31
+ apply_smoothing,
32
+ convert_timer_to_stage_names,
33
+ determine_confidence_levels,
34
+ extract_vertical_positions,
35
+ parse_quality_preset,
36
+ print_verbose_parameters,
37
+ process_all_frames,
38
+ process_videos_bulk_generic,
39
+ )
40
+ from ..core.pose import PoseTracker
41
+ from ..core.quality import QualityAssessment, assess_jump_quality
42
+ from ..core.timing import PerformanceTimer, Timer
43
+ from ..core.validation import ValidationResult
44
+ from ..core.video_io import VideoProcessor
45
+ from .analysis import compute_signed_velocity, detect_cmj_phases
46
+ from .debug_overlay import CMJDebugOverlayRenderer
47
+ from .kinematics import CMJMetrics, calculate_cmj_metrics
48
+ from .metrics_validator import CMJMetricsValidator
49
+
50
+
51
+ def _generate_debug_video(
52
+ output_video: str,
53
+ frames: list[NDArray[np.uint8]],
54
+ frame_indices: list[int],
55
+ smoothed_landmarks: list,
56
+ metrics: CMJMetrics,
57
+ video_fps: float,
58
+ timer: Timer,
59
+ verbose: bool,
60
+ ) -> None:
61
+ """Generate debug video with CMJ analysis overlay."""
62
+ if verbose:
63
+ print(f"Generating debug video: {output_video}")
64
+
65
+ debug_h, debug_w = frames[0].shape[:2]
66
+ step = max(1, int(video_fps / 30.0))
67
+ debug_fps = video_fps / step
68
+
69
+ with timer.measure("debug_video_generation"):
70
+ with CMJDebugOverlayRenderer(
71
+ output_video,
72
+ debug_w,
73
+ debug_h,
74
+ debug_w,
75
+ debug_h,
76
+ debug_fps,
77
+ timer=timer,
78
+ ) as renderer:
79
+ for frame, idx in zip(frames, frame_indices, strict=True):
80
+ annotated = renderer.render_frame(
81
+ frame, smoothed_landmarks[idx], idx, metrics
82
+ )
83
+ renderer.write_frame(annotated)
84
+
85
+ if verbose:
86
+ print(f"Debug video saved: {output_video}")
87
+
88
+
89
+ def _save_metrics_to_json(
90
+ metrics: CMJMetrics, json_output: str, timer: Timer, verbose: bool
91
+ ) -> None:
92
+ """Save metrics to JSON file."""
93
+ with timer.measure("json_serialization"):
94
+ output_path = Path(json_output)
95
+ metrics_dict = metrics.to_dict()
96
+ json_str = json.dumps(metrics_dict, indent=2)
97
+ output_path.write_text(json_str)
98
+
99
+ if verbose:
100
+ print(f"Metrics written to: {json_output}")
101
+
102
+
103
+ def _print_timing_summary(start_time: float, timer: Timer, metrics: CMJMetrics) -> None:
104
+ """Print verbose timing summary and metrics."""
105
+ total_time = time.time() - start_time
106
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
107
+
108
+ print("\n=== Timing Summary ===")
109
+ for stage, duration in stage_times.items():
110
+ percentage = (duration / total_time) * 100
111
+ dur_ms = duration * 1000
112
+ print(f"{stage:. <40} {dur_ms:>6.0f}ms ({percentage:>5.1f}%)")
113
+ total_ms = total_time * 1000
114
+ print(f"{('Total'):.>40} {total_ms:>6.0f}ms (100.0%)")
115
+ print()
116
+
117
+ print(f"\nJump height: {metrics.jump_height:.3f}m")
118
+ print(f"Flight time: {metrics.flight_time * 1000:.1f}ms")
119
+ print(f"Countermovement depth: {metrics.countermovement_depth:.3f}m")
120
+
121
+
122
+ def _print_quality_warnings(quality_result: QualityAssessment, verbose: bool) -> None:
123
+ """Print quality warnings if present."""
124
+ if verbose and quality_result.warnings:
125
+ print("\n⚠️ Quality Warnings:")
126
+ for warning in quality_result.warnings:
127
+ print(f" - {warning}")
128
+ print()
129
+
130
+
131
+ def _print_validation_results(
132
+ validation_result: ValidationResult, verbose: bool
133
+ ) -> None:
134
+ """Print validation issues if present."""
135
+ if verbose and validation_result.issues:
136
+ print("\n⚠️ Validation Results:")
137
+ for issue in validation_result.issues:
138
+ print(f" [{issue.severity.value}] {issue.metric}: {issue.message}")
139
+
140
+
141
+ def _create_algorithm_config(params: AnalysisParameters) -> AlgorithmConfig:
142
+ """Create algorithm configuration from parameters."""
143
+ return AlgorithmConfig(
144
+ detection_method="backward_search",
145
+ tracking_method="mediapipe_pose",
146
+ model_complexity=1,
147
+ smoothing=SmoothingConfig(
148
+ window_size=params.smoothing_window,
149
+ polynomial_order=params.polyorder,
150
+ use_bilateral_filter=params.bilateral_filter,
151
+ use_outlier_rejection=params.outlier_rejection,
152
+ ),
153
+ detection=DetectionConfig(
154
+ velocity_threshold=params.velocity_threshold,
155
+ min_contact_frames=params.min_contact_frames,
156
+ visibility_threshold=params.visibility_threshold,
157
+ use_curvature_refinement=params.use_curvature,
158
+ ),
159
+ drop_detection=None,
160
+ )
161
+
162
+
163
+ def _create_video_info(video_path: str, video: VideoProcessor) -> VideoInfo:
164
+ """Create video information metadata."""
165
+ return VideoInfo(
166
+ source_path=video_path,
167
+ fps=video.fps,
168
+ width=video.width,
169
+ height=video.height,
170
+ duration_s=video.frame_count / video.fps,
171
+ frame_count=video.frame_count,
172
+ codec=video.codec,
173
+ )
174
+
175
+
176
+ def _create_processing_info(
177
+ start_time: float, quality_preset: QualityPreset, timer: Timer
178
+ ) -> ProcessingInfo:
179
+ """Create processing information metadata."""
180
+ processing_time = time.time() - start_time
181
+ stage_times = convert_timer_to_stage_names(timer.get_metrics())
182
+
183
+ return ProcessingInfo(
184
+ version=get_kinemotion_version(),
185
+ timestamp=create_timestamp(),
186
+ quality_preset=quality_preset.value,
187
+ processing_time_s=processing_time,
188
+ timing_breakdown=stage_times,
189
+ )
190
+
191
+
192
+ def _create_result_metadata(
193
+ quality_result: QualityAssessment,
194
+ video_info: VideoInfo,
195
+ processing_info: ProcessingInfo,
196
+ algorithm_config: AlgorithmConfig,
197
+ ) -> ResultMetadata:
198
+ """Create result metadata from components."""
199
+ return ResultMetadata(
200
+ quality=quality_result,
201
+ video=video_info,
202
+ processing=processing_info,
203
+ algorithm=algorithm_config,
204
+ )
205
+
206
+
207
+ @dataclass
208
+ class CMJVideoConfig:
209
+ """Configuration for processing a single CMJ video."""
210
+
211
+ video_path: str
212
+ quality: str = "balanced"
213
+ output_video: str | None = None
214
+ json_output: str | None = None
215
+ smoothing_window: int | None = None
216
+ velocity_threshold: float | None = None
217
+ min_contact_frames: int | None = None
218
+ visibility_threshold: float | None = None
219
+ detection_confidence: float | None = None
220
+ tracking_confidence: float | None = None
221
+
222
+
223
+ @dataclass
224
+ class CMJVideoResult:
225
+ """Result of processing a single CMJ video."""
226
+
227
+ video_path: str
228
+ success: bool
229
+ metrics: CMJMetrics | None = None
230
+ error: str | None = None
231
+ processing_time: float = 0.0
232
+
233
+
234
+ def process_cmj_video(
235
+ video_path: str,
236
+ quality: str = "balanced",
237
+ output_video: str | None = None,
238
+ json_output: str | None = None,
239
+ smoothing_window: int | None = None,
240
+ velocity_threshold: float | None = None,
241
+ min_contact_frames: int | None = None,
242
+ visibility_threshold: float | None = None,
243
+ detection_confidence: float | None = None,
244
+ tracking_confidence: float | None = None,
245
+ verbose: bool = False,
246
+ timer: Timer | None = None,
247
+ pose_tracker: PoseTracker | None = None,
248
+ ) -> CMJMetrics:
249
+ """
250
+ Process a single CMJ video and return metrics.
251
+
252
+ CMJ (Counter Movement Jump) is performed at floor level without a drop box.
253
+ Athletes start standing, perform a countermovement (eccentric phase), then
254
+ jump upward (concentric phase).
255
+
256
+ Args:
257
+ video_path: Path to the input video file
258
+ quality: Analysis quality preset ("fast", "balanced", or "accurate")
259
+ output_video: Optional path for debug video output
260
+ json_output: Optional path for JSON metrics output
261
+ smoothing_window: Optional override for smoothing window
262
+ velocity_threshold: Optional override for velocity threshold
263
+ min_contact_frames: Optional override for minimum contact frames
264
+ visibility_threshold: Optional override for visibility threshold
265
+ detection_confidence: Optional override for pose detection confidence
266
+ tracking_confidence: Optional override for pose tracking confidence
267
+ verbose: Print processing details
268
+ timer: Optional Timer for measuring operations
269
+ pose_tracker: Optional pre-initialized PoseTracker instance (reused if provided)
270
+
271
+ Returns:
272
+ CMJMetrics object containing analysis results
273
+
274
+ Raises:
275
+ ValueError: If video cannot be processed or parameters are invalid
276
+ FileNotFoundError: If video file does not exist
277
+ """
278
+ if not Path(video_path).exists():
279
+ raise FileNotFoundError(f"Video file not found: {video_path}")
280
+
281
+ start_time = time.time()
282
+ timer = timer or PerformanceTimer()
283
+ quality_preset = parse_quality_preset(quality)
284
+
285
+ with timer.measure("video_initialization"):
286
+ with VideoProcessor(video_path, timer=timer) as video:
287
+ if verbose:
288
+ print(
289
+ f"Video: {video.width}x{video.height} @ {video.fps:.2f} fps, "
290
+ f"{video.frame_count} frames"
291
+ )
292
+
293
+ det_conf, track_conf = determine_confidence_levels(
294
+ quality_preset, detection_confidence, tracking_confidence
295
+ )
296
+
297
+ if verbose:
298
+ print("Processing all frames with MediaPipe pose tracking...")
299
+
300
+ tracker = pose_tracker or PoseTracker(
301
+ min_detection_confidence=det_conf,
302
+ min_tracking_confidence=track_conf,
303
+ timer=timer,
304
+ )
305
+ should_close_tracker = pose_tracker is None
306
+
307
+ frames, landmarks_sequence, frame_indices = process_all_frames(
308
+ video, tracker, verbose, timer, close_tracker=should_close_tracker
309
+ )
310
+
311
+ with timer.measure("parameter_auto_tuning"):
312
+ characteristics = analyze_video_sample(
313
+ landmarks_sequence, video.fps, video.frame_count
314
+ )
315
+ params = auto_tune_parameters(characteristics, quality_preset)
316
+ params = apply_expert_overrides(
317
+ params,
318
+ smoothing_window,
319
+ velocity_threshold,
320
+ min_contact_frames,
321
+ visibility_threshold,
322
+ )
323
+
324
+ if verbose:
325
+ print_verbose_parameters(
326
+ video, characteristics, quality_preset, params
327
+ )
328
+
329
+ smoothed_landmarks = apply_smoothing(
330
+ landmarks_sequence, params, verbose, timer
331
+ )
332
+
333
+ if verbose:
334
+ print("Extracting vertical positions (Hip and Foot)...")
335
+ with timer.measure("vertical_position_extraction"):
336
+ vertical_positions, visibilities = extract_vertical_positions(
337
+ smoothed_landmarks, target="hip"
338
+ )
339
+ foot_positions, _ = extract_vertical_positions(
340
+ smoothed_landmarks, target="foot"
341
+ )
342
+
343
+ if verbose:
344
+ print("Detecting CMJ phases...")
345
+ with timer.measure("phase_detection"):
346
+ phases = detect_cmj_phases(
347
+ vertical_positions,
348
+ video.fps,
349
+ window_length=params.smoothing_window,
350
+ polyorder=params.polyorder,
351
+ landing_positions=foot_positions,
352
+ timer=timer,
353
+ )
354
+
355
+ if phases is None:
356
+ raise ValueError("Could not detect CMJ phases in video")
357
+
358
+ standing_end, lowest_point, takeoff_frame, landing_frame = phases
359
+
360
+ if verbose:
361
+ print("Calculating metrics...")
362
+ with timer.measure("metrics_calculation"):
363
+ velocities = compute_signed_velocity(
364
+ vertical_positions,
365
+ window_length=params.smoothing_window,
366
+ polyorder=params.polyorder,
367
+ )
368
+ metrics = calculate_cmj_metrics(
369
+ vertical_positions,
370
+ velocities,
371
+ standing_end,
372
+ lowest_point,
373
+ takeoff_frame,
374
+ landing_frame,
375
+ video.fps,
376
+ tracking_method="hip_hybrid",
377
+ )
378
+
379
+ if verbose:
380
+ print("Assessing tracking quality...")
381
+ with timer.measure("quality_assessment"):
382
+ _, outlier_mask = reject_outliers(
383
+ vertical_positions,
384
+ use_ransac=True,
385
+ use_median=True,
386
+ interpolate=False,
387
+ )
388
+ quality_result = assess_jump_quality(
389
+ visibilities=visibilities,
390
+ positions=vertical_positions,
391
+ outlier_mask=outlier_mask,
392
+ fps=video.fps,
393
+ phases_detected=True,
394
+ phase_count=4,
395
+ )
396
+
397
+ _print_quality_warnings(quality_result, verbose)
398
+
399
+ if output_video:
400
+ _generate_debug_video(
401
+ output_video,
402
+ frames,
403
+ frame_indices,
404
+ smoothed_landmarks,
405
+ metrics,
406
+ video.fps,
407
+ timer,
408
+ verbose,
409
+ )
410
+
411
+ with timer.measure("metrics_validation"):
412
+ validator = CMJMetricsValidator()
413
+ validation_result = validator.validate(metrics.to_dict()) # type: ignore[arg-type]
414
+ metrics.validation_result = validation_result
415
+
416
+ algorithm_config = _create_algorithm_config(params)
417
+ video_info = _create_video_info(video_path, video)
418
+ processing_info = _create_processing_info(start_time, quality_preset, timer)
419
+ result_metadata = _create_result_metadata(
420
+ quality_result, video_info, processing_info, algorithm_config
421
+ )
422
+ metrics.result_metadata = result_metadata
423
+
424
+ if json_output:
425
+ _save_metrics_to_json(metrics, json_output, timer, verbose)
426
+
427
+ _print_validation_results(validation_result, verbose)
428
+
429
+ if verbose:
430
+ _print_timing_summary(start_time, timer, metrics)
431
+
432
+ return metrics
433
+
434
+
435
+ def process_cmj_videos_bulk(
436
+ configs: list[CMJVideoConfig],
437
+ max_workers: int = 4,
438
+ progress_callback: Callable[[CMJVideoResult], None] | None = None,
439
+ ) -> list[CMJVideoResult]:
440
+ """
441
+ Process multiple CMJ videos in parallel.
442
+ """
443
+
444
+ def error_factory(video_path: str, error_msg: str) -> CMJVideoResult:
445
+ return CMJVideoResult(video_path=video_path, success=False, error=error_msg)
446
+
447
+ return process_videos_bulk_generic(
448
+ configs,
449
+ _process_cmj_video_wrapper,
450
+ error_factory,
451
+ max_workers,
452
+ progress_callback,
453
+ )
454
+
455
+
456
+ def _process_cmj_video_wrapper(config: CMJVideoConfig) -> CMJVideoResult:
457
+ """Wrapper function for parallel CMJ processing."""
458
+ start_time = time.time()
459
+
460
+ try:
461
+ metrics = process_cmj_video(
462
+ video_path=config.video_path,
463
+ quality=config.quality,
464
+ output_video=config.output_video,
465
+ json_output=config.json_output,
466
+ smoothing_window=config.smoothing_window,
467
+ velocity_threshold=config.velocity_threshold,
468
+ min_contact_frames=config.min_contact_frames,
469
+ visibility_threshold=config.visibility_threshold,
470
+ detection_confidence=config.detection_confidence,
471
+ tracking_confidence=config.tracking_confidence,
472
+ verbose=False,
473
+ )
474
+
475
+ processing_time = time.time() - start_time
476
+
477
+ return CMJVideoResult(
478
+ video_path=config.video_path,
479
+ success=True,
480
+ metrics=metrics,
481
+ processing_time=processing_time,
482
+ )
483
+
484
+ except Exception as e:
485
+ processing_time = time.time() - start_time
486
+
487
+ return CMJVideoResult(
488
+ video_path=config.video_path,
489
+ success=False,
490
+ error=str(e),
491
+ processing_time=processing_time,
492
+ )
kinemotion/cmj/cli.py CHANGED
@@ -6,13 +6,14 @@ from dataclasses import dataclass
6
6
 
7
7
  import click
8
8
 
9
- from ..api import CMJMetrics, process_cmj_video
10
9
  from ..core.auto_tuning import QualityPreset
11
10
  from ..core.cli_utils import (
12
11
  collect_video_files,
13
12
  common_output_options,
14
13
  generate_batch_output_paths,
15
14
  )
15
+ from .api import process_cmj_video
16
+ from .kinematics import CMJMetrics
16
17
 
17
18
 
18
19
  @dataclass
@@ -24,9 +24,7 @@ from .smoothing import (
24
24
  )
25
25
  from .timing import (
26
26
  NULL_TIMER,
27
- CompositeTimer,
28
27
  NullTimer,
29
- OpenTelemetryTimer,
30
28
  PerformanceTimer,
31
29
  Timer,
32
30
  )
@@ -59,8 +57,6 @@ __all__ = [
59
57
  "Timer",
60
58
  "NullTimer",
61
59
  "NULL_TIMER",
62
- "CompositeTimer",
63
- "OpenTelemetryTimer",
64
60
  # Video I/O
65
61
  "VideoProcessor",
66
62
  ]
kinemotion/core/timing.py CHANGED
@@ -26,43 +26,9 @@ Example:
26
26
  """
27
27
 
28
28
  import time
29
- from contextlib import AbstractContextManager, ExitStack, contextmanager
29
+ from contextlib import AbstractContextManager
30
30
  from typing import Protocol, runtime_checkable
31
31
 
32
- # OpenTelemetry related imports, guarded by try-except for optional dependency
33
- _trace_module = None # This will hold the actual 'trace' module if imported
34
- _otel_tracer_class = None # This will hold the actual 'Tracer' class if imported
35
-
36
- try:
37
- import opentelemetry.trace as _trace_module_import # Import the module directly
38
-
39
- _otel_tracer_class = (
40
- _trace_module_import.Tracer
41
- ) # Get the Tracer class from the module
42
- _trace_module = (
43
- _trace_module_import # Expose the trace module globally after successful import
44
- )
45
- except ImportError:
46
- pass # No OTel, so these remain None
47
-
48
- # Now define the global/module-level variables used elsewhere
49
- # Conditionally expose 'trace' and 'Tracer' aliases
50
- trace = _trace_module # This will be the actual module or None
51
-
52
-
53
- class Tracer: # Dummy for type hints if actual Tracer is not available
54
- pass
55
-
56
-
57
- if _otel_tracer_class:
58
- Tracer = _otel_tracer_class # type: ignore # Override dummy if actual Tracer is available
59
-
60
- # This _OPENTELEMETRY_AVAILABLE variable is assigned only once,
61
- # after the try-except block
62
- _OPENTELEMETRY_AVAILABLE = bool(
63
- _otel_tracer_class
64
- ) # True if Tracer class was successfully loaded
65
-
66
32
 
67
33
  @runtime_checkable
68
34
  class Timer(Protocol):
@@ -279,105 +245,3 @@ class PerformanceTimer:
279
245
  A copy of the metrics dictionary to prevent external modification.
280
246
  """
281
247
  return self.metrics.copy()
282
-
283
-
284
- @contextmanager
285
- def _composite_context_manager(contexts: list[AbstractContextManager[None]]):
286
- """Helper to combine multiple context managers into one.
287
-
288
- Uses ExitStack to manage entering and exiting multiple contexts transparently.
289
- """
290
- with ExitStack() as stack:
291
- for ctx in contexts:
292
- stack.enter_context(ctx)
293
- yield
294
-
295
-
296
- class CompositeTimer:
297
- """Timer that delegates measurements to multiple underlying timers.
298
-
299
- Useful for enabling both local performance timing (for JSON output)
300
- and distributed tracing (OpenTelemetry) simultaneously.
301
- """
302
-
303
- __slots__ = ("timers",)
304
-
305
- def __init__(self, timers: list[Timer]) -> None:
306
- """Initialize composite timer.
307
-
308
- Args:
309
- timers: List of timer instances to delegate to
310
- """
311
- self.timers = timers
312
-
313
- def measure(self, name: str) -> AbstractContextManager[None]:
314
- """Measure using all underlying timers.
315
-
316
- Args:
317
- name: Name of the operation
318
-
319
- Returns:
320
- Context manager that manages all underlying timers
321
- """
322
- contexts = [timer.measure(name) for timer in self.timers]
323
- return _composite_context_manager(contexts)
324
-
325
- def get_metrics(self) -> dict[str, float]:
326
- """Get combined metrics from all timers.
327
-
328
- Returns:
329
- Merged dictionary of metrics
330
- """
331
- metrics = {}
332
- for timer in self.timers:
333
- metrics.update(timer.get_metrics())
334
- return metrics
335
-
336
-
337
- class OpenTelemetryTimer:
338
- """Timer implementation that creates OpenTelemetry spans.
339
-
340
- Maps 'measure' calls to OTel spans. Requires opentelemetry-api installed.
341
- """
342
-
343
- __slots__ = ("tracer",)
344
-
345
- def __init__(self, tracer: Tracer | None = None) -> None:
346
- """Initialize OTel timer.
347
-
348
- Args:
349
- tracer: Optional OTel tracer. If None, gets tracer for module name.
350
- """
351
- if not _OPENTELEMETRY_AVAILABLE:
352
- self.tracer = None # Always initialize self.tracer for __slots__
353
- return
354
-
355
- if trace is not None:
356
- self.tracer = tracer or trace.get_tracer(__name__)
357
- else:
358
- # This branch should ideally not be reached if _OPENTELEMETRY_AVAILABLE
359
- # is True but trace is None (meaning import succeeded but trace was not what
360
- # expected). Defensive programming: ensure self.tracer is set.
361
- self.tracer = None
362
-
363
- def measure(self, name: str) -> AbstractContextManager[None]:
364
- """Start an OpenTelemetry span.
365
-
366
- Args:
367
- name: Name of the span
368
-
369
- Returns:
370
- Span context manager (compatible with AbstractContextManager)
371
- """
372
- if not _OPENTELEMETRY_AVAILABLE or self.tracer is None:
373
- return _NULL_CONTEXT # Return the no-op context
374
-
375
- return self.tracer.start_as_current_span(name)
376
-
377
- def get_metrics(self) -> dict[str, float]:
378
- """Return empty metrics (OTel handles export asynchronously).
379
-
380
- Returns:
381
- Empty dictionary
382
- """
383
- return {}