kinemotion 0.67.0__py3-none-any.whl → 0.68.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kinemotion might be problematic. Click here for more details.
- kinemotion/cli.py +4 -3
- kinemotion/cmj/api.py +45 -17
- kinemotion/cmj/cli.py +21 -0
- kinemotion/core/__init__.py +9 -2
- kinemotion/core/debug_overlay_utils.py +2 -2
- kinemotion/core/pipeline_utils.py +3 -3
- kinemotion/core/pose.py +462 -1
- kinemotion/core/rtmpose_cpu.py +626 -0
- kinemotion/core/rtmpose_wrapper.py +190 -0
- kinemotion/core/timing.py +4 -2
- kinemotion/dropjump/api.py +40 -16
- kinemotion/dropjump/cli.py +27 -1
- kinemotion/models/rtmpose-s_simcc-body7_pt-body7-halpe26_700e-256x192-7f134165_20230605.onnx +0 -0
- kinemotion/models/yolox_tiny_8xb8-300e_humanart-6f3252f9.onnx +0 -0
- {kinemotion-0.67.0.dist-info → kinemotion-0.68.0.dist-info}/METADATA +5 -1
- {kinemotion-0.67.0.dist-info → kinemotion-0.68.0.dist-info}/RECORD +19 -15
- {kinemotion-0.67.0.dist-info → kinemotion-0.68.0.dist-info}/WHEEL +0 -0
- {kinemotion-0.67.0.dist-info → kinemotion-0.68.0.dist-info}/entry_points.txt +0 -0
- {kinemotion-0.67.0.dist-info → kinemotion-0.68.0.dist-info}/licenses/LICENSE +0 -0
kinemotion/core/pose.py
CHANGED
|
@@ -52,7 +52,7 @@ _STRATEGY_CONFIGS: dict[str, dict[str, float | str]] = {
|
|
|
52
52
|
}
|
|
53
53
|
|
|
54
54
|
|
|
55
|
-
class
|
|
55
|
+
class MediaPipePoseTracker:
|
|
56
56
|
"""Tracks human pose landmarks in video frames using MediaPipe Tasks API.
|
|
57
57
|
|
|
58
58
|
Args:
|
|
@@ -158,6 +158,467 @@ class PoseTracker:
|
|
|
158
158
|
pass
|
|
159
159
|
|
|
160
160
|
|
|
161
|
+
class PoseTrackerFactory:
|
|
162
|
+
"""Factory for creating pose trackers with automatic backend selection.
|
|
163
|
+
|
|
164
|
+
Supports multiple backends with auto-detection:
|
|
165
|
+
- RTMPose CUDA: NVIDIA GPU acceleration (fastest, 133 FPS)
|
|
166
|
+
- RTMPose CoreML: Apple Silicon acceleration (42 FPS)
|
|
167
|
+
- RTMPose CPU: Optimized CPU implementation (40-68 FPS)
|
|
168
|
+
- MediaPipe: Fallback baseline (48 FPS)
|
|
169
|
+
|
|
170
|
+
Usage:
|
|
171
|
+
# Auto-detect best backend
|
|
172
|
+
tracker = PoseTrackerFactory.create()
|
|
173
|
+
|
|
174
|
+
# Force specific backend
|
|
175
|
+
tracker = PoseTrackerFactory.create(backend='rtmpose-cuda')
|
|
176
|
+
|
|
177
|
+
# Check available backends
|
|
178
|
+
available = PoseTrackerFactory.get_available_backends()
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
# Backend class mappings
|
|
182
|
+
_BACKENDS: dict[str, type] = {}
|
|
183
|
+
|
|
184
|
+
@classmethod
|
|
185
|
+
def create(
|
|
186
|
+
cls,
|
|
187
|
+
backend: str = "auto",
|
|
188
|
+
mode: str = "lightweight",
|
|
189
|
+
**kwargs: object,
|
|
190
|
+
):
|
|
191
|
+
"""Create a pose tracker with the specified backend.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
backend: Backend selection:
|
|
195
|
+
- 'auto': Auto-detect best available backend
|
|
196
|
+
- 'mediapipe': MediaPipe Tasks API (baseline)
|
|
197
|
+
- 'rtmpose-cpu': RTMPose optimized CPU
|
|
198
|
+
- 'rtmpose-cuda': RTMPose with CUDA (NVIDIA GPU)
|
|
199
|
+
- 'rtmpose-coreml': RTMPose with CoreML (Apple Silicon)
|
|
200
|
+
mode: RTMPose performance mode ('lightweight', 'balanced', 'performance')
|
|
201
|
+
Only used for RTMPose backends
|
|
202
|
+
**kwargs: Additional arguments passed to tracker constructor
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Configured pose tracker instance
|
|
206
|
+
|
|
207
|
+
Raises:
|
|
208
|
+
ValueError: If backend is not available or recognized
|
|
209
|
+
"""
|
|
210
|
+
# Auto-detect backend
|
|
211
|
+
if backend == "auto":
|
|
212
|
+
backend = cls._detect_best_backend()
|
|
213
|
+
backend = cls._check_backend_available(backend)
|
|
214
|
+
|
|
215
|
+
# Check environment variable override
|
|
216
|
+
import os
|
|
217
|
+
|
|
218
|
+
env_backend = os.environ.get("POSE_TRACKER_BACKEND")
|
|
219
|
+
if env_backend:
|
|
220
|
+
backend = cls._normalize_backend_name(env_backend)
|
|
221
|
+
|
|
222
|
+
# Verify backend is available
|
|
223
|
+
backend = cls._check_backend_available(backend)
|
|
224
|
+
|
|
225
|
+
# Get tracker class
|
|
226
|
+
tracker_class = cls._get_tracker_class(backend)
|
|
227
|
+
|
|
228
|
+
# Create tracker with appropriate arguments
|
|
229
|
+
return cls._create_tracker(tracker_class, backend, mode, kwargs)
|
|
230
|
+
|
|
231
|
+
@classmethod
|
|
232
|
+
def _detect_best_backend(cls) -> str:
|
|
233
|
+
"""Detect the best available backend.
|
|
234
|
+
|
|
235
|
+
Priority order:
|
|
236
|
+
1. CUDA (NVIDIA GPU) - fastest
|
|
237
|
+
2. CoreML (Apple Silicon) - good performance
|
|
238
|
+
3. RTMPose CPU - optimized CPU
|
|
239
|
+
4. MediaPipe - baseline fallback
|
|
240
|
+
|
|
241
|
+
Returns:
|
|
242
|
+
Backend name string
|
|
243
|
+
"""
|
|
244
|
+
# Check for CUDA (NVIDIA GPU)
|
|
245
|
+
try:
|
|
246
|
+
import torch
|
|
247
|
+
|
|
248
|
+
if torch.cuda.is_available():
|
|
249
|
+
return "rtmpose-cuda"
|
|
250
|
+
except ImportError:
|
|
251
|
+
pass
|
|
252
|
+
|
|
253
|
+
# Check for CoreML (Apple Silicon)
|
|
254
|
+
import sys
|
|
255
|
+
|
|
256
|
+
if sys.platform == "darwin":
|
|
257
|
+
return "rtmpose-coreml"
|
|
258
|
+
|
|
259
|
+
# Check for RTMPose CPU
|
|
260
|
+
try:
|
|
261
|
+
from kinemotion.core.rtmpose_cpu import (
|
|
262
|
+
OptimizedCPUTracker as _RTMPoseCPU, # type: ignore
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
_ = _RTMPoseCPU # Mark as intentionally used for availability check
|
|
266
|
+
|
|
267
|
+
return "rtmpose-cpu"
|
|
268
|
+
except ImportError:
|
|
269
|
+
pass
|
|
270
|
+
|
|
271
|
+
# Fallback to MediaPipe
|
|
272
|
+
return "mediapipe"
|
|
273
|
+
|
|
274
|
+
@classmethod
|
|
275
|
+
def _check_backend_available(cls, backend: str) -> str:
|
|
276
|
+
"""Check if a backend is available and return a fallback if not.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
backend: Requested backend name
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Available backend name (may be different from requested)
|
|
283
|
+
|
|
284
|
+
Raises:
|
|
285
|
+
ValueError: If no backend is available
|
|
286
|
+
"""
|
|
287
|
+
normalized = cls._normalize_backend_name(backend)
|
|
288
|
+
|
|
289
|
+
# Check if specific backend can be imported
|
|
290
|
+
if normalized == "rtmpose-cuda":
|
|
291
|
+
try:
|
|
292
|
+
import torch # noqa: F401
|
|
293
|
+
|
|
294
|
+
if not torch.cuda.is_available():
|
|
295
|
+
# CUDA not available, fall back to CPU
|
|
296
|
+
return cls._check_backend_available("rtmpose-cpu")
|
|
297
|
+
# CUDA is available, use rtmpose-cuda
|
|
298
|
+
return normalized
|
|
299
|
+
except ImportError:
|
|
300
|
+
return cls._check_backend_available("rtmpose-cpu")
|
|
301
|
+
|
|
302
|
+
if normalized == "rtmpose-coreml":
|
|
303
|
+
import sys
|
|
304
|
+
|
|
305
|
+
if sys.platform != "darwin":
|
|
306
|
+
# Not macOS, fall back to CPU
|
|
307
|
+
return cls._check_backend_available("rtmpose-cpu")
|
|
308
|
+
|
|
309
|
+
if normalized == "rtmpose-cpu":
|
|
310
|
+
try:
|
|
311
|
+
from kinemotion.core.rtmpose_cpu import (
|
|
312
|
+
OptimizedCPUTracker as _RTMPoseCPU,
|
|
313
|
+
) # type: ignore
|
|
314
|
+
|
|
315
|
+
_ = _RTMPoseCPU # Mark as intentionally used for availability check
|
|
316
|
+
|
|
317
|
+
return normalized
|
|
318
|
+
except ImportError:
|
|
319
|
+
# RTMPose not available, fall back to MediaPipe
|
|
320
|
+
return "mediapipe"
|
|
321
|
+
|
|
322
|
+
if normalized == "mediapipe":
|
|
323
|
+
try:
|
|
324
|
+
import mediapipe as _mp # noqa: F401
|
|
325
|
+
|
|
326
|
+
_ = _mp # Mark as intentionally used for availability check
|
|
327
|
+
return normalized
|
|
328
|
+
except ImportError as err:
|
|
329
|
+
raise ValueError(
|
|
330
|
+
"No pose tracking backend available. Please install mediapipe or rtmlib."
|
|
331
|
+
) from err
|
|
332
|
+
|
|
333
|
+
raise ValueError(f"Unknown backend: {backend}")
|
|
334
|
+
|
|
335
|
+
@classmethod
|
|
336
|
+
def _normalize_backend_name(cls, backend: str) -> str:
|
|
337
|
+
"""Normalize backend name to canonical form.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
backend: User-provided backend name
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
Canonical backend name
|
|
344
|
+
"""
|
|
345
|
+
# Normalize various aliases to canonical names
|
|
346
|
+
aliases = {
|
|
347
|
+
"mp": "mediapipe",
|
|
348
|
+
"mediapipe": "mediapipe",
|
|
349
|
+
"rtmpose": "rtmpose-cpu",
|
|
350
|
+
"rtmpose-cpu": "rtmpose-cpu",
|
|
351
|
+
"rtmpose_cpu": "rtmpose-cpu",
|
|
352
|
+
"cpu": "rtmpose-cpu",
|
|
353
|
+
"cuda": "rtmpose-cuda",
|
|
354
|
+
"rtmpose-cuda": "rtmpose-cuda",
|
|
355
|
+
"rtmpose_cuda": "rtmpose-cuda",
|
|
356
|
+
"gpu": "rtmpose-cuda",
|
|
357
|
+
"mps": "rtmpose-coreml",
|
|
358
|
+
"coreml": "rtmpose-coreml",
|
|
359
|
+
"rtmpose-coreml": "rtmpose-coreml",
|
|
360
|
+
"rtmpose_coreml": "rtmpose-coreml",
|
|
361
|
+
}
|
|
362
|
+
return aliases.get(backend.lower(), backend)
|
|
363
|
+
|
|
364
|
+
@classmethod
|
|
365
|
+
def _get_tracker_class(cls, backend: str):
|
|
366
|
+
"""Get the tracker class for a backend.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
backend: Canonical backend name
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
Tracker class
|
|
373
|
+
|
|
374
|
+
Raises:
|
|
375
|
+
ValueError: If backend is not recognized
|
|
376
|
+
"""
|
|
377
|
+
if backend == "mediapipe":
|
|
378
|
+
return MediaPipePoseTracker
|
|
379
|
+
|
|
380
|
+
if backend == "rtmpose-cpu":
|
|
381
|
+
try:
|
|
382
|
+
from kinemotion.core.rtmpose_cpu import OptimizedCPUTracker
|
|
383
|
+
|
|
384
|
+
return OptimizedCPUTracker
|
|
385
|
+
except ImportError as e:
|
|
386
|
+
raise ValueError(f"RTMPose CPU backend requested but not available: {e}") from e
|
|
387
|
+
|
|
388
|
+
if backend in ("rtmpose-cuda", "rtmpose-coreml"):
|
|
389
|
+
try:
|
|
390
|
+
from kinemotion.core.rtmpose_wrapper import RTMPoseWrapper
|
|
391
|
+
|
|
392
|
+
return RTMPoseWrapper
|
|
393
|
+
except ImportError as e:
|
|
394
|
+
raise ValueError(
|
|
395
|
+
f"RTMPose wrapper backend requested but not available: {e}"
|
|
396
|
+
) from e
|
|
397
|
+
|
|
398
|
+
raise ValueError(f"Unknown backend: {backend}")
|
|
399
|
+
|
|
400
|
+
@classmethod
|
|
401
|
+
def _create_tracker(
|
|
402
|
+
cls,
|
|
403
|
+
tracker_class: type,
|
|
404
|
+
backend: str,
|
|
405
|
+
mode: str,
|
|
406
|
+
kwargs: dict[str, object],
|
|
407
|
+
):
|
|
408
|
+
"""Create a tracker instance with appropriate arguments.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
tracker_class: Tracker class to instantiate
|
|
412
|
+
backend: Backend name (for parameter mapping)
|
|
413
|
+
mode: RTMPose mode (only used for RTMPose backends)
|
|
414
|
+
kwargs: Additional arguments from user
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Tracker instance
|
|
418
|
+
"""
|
|
419
|
+
# MediaPipe-specific arguments
|
|
420
|
+
if backend == "mediapipe":
|
|
421
|
+
# Remove RTMPose-specific arguments
|
|
422
|
+
rttmpose_keys = {"mode", "backend", "device", "pose_input_size"}
|
|
423
|
+
filtered_kwargs = {k: v for k, v in kwargs.items() if k not in rttmpose_keys}
|
|
424
|
+
return tracker_class(**filtered_kwargs)
|
|
425
|
+
|
|
426
|
+
# OptimizedCPUTracker (CPU-only, doesn't accept device parameter)
|
|
427
|
+
if backend == "rtmpose-cpu":
|
|
428
|
+
# Remove RTMPoseWrapper-specific and MediaPipe-specific arguments
|
|
429
|
+
unsupported_keys = {
|
|
430
|
+
"backend",
|
|
431
|
+
"device",
|
|
432
|
+
"min_detection_confidence",
|
|
433
|
+
"min_tracking_confidence",
|
|
434
|
+
}
|
|
435
|
+
filtered_kwargs = {k: v for k, v in kwargs.items() if k not in unsupported_keys}
|
|
436
|
+
filtered_kwargs.setdefault("mode", mode)
|
|
437
|
+
return tracker_class(**filtered_kwargs)
|
|
438
|
+
|
|
439
|
+
# RTMPoseWrapper (CUDA/CoreML, requires device parameter)
|
|
440
|
+
# Remove MediaPipe-specific arguments
|
|
441
|
+
mediapipe_keys = {"min_detection_confidence", "min_tracking_confidence"}
|
|
442
|
+
filtered_kwargs = {k: v for k, v in kwargs.items() if k not in mediapipe_keys}
|
|
443
|
+
|
|
444
|
+
device = backend.split("-")[-1] # Extract 'cuda', 'cpu', 'coreml'
|
|
445
|
+
if device == "coreml":
|
|
446
|
+
device = "mps" # RTMLib uses 'mps' for Apple Silicon
|
|
447
|
+
|
|
448
|
+
filtered_kwargs.setdefault("device", device)
|
|
449
|
+
filtered_kwargs.setdefault("mode", mode)
|
|
450
|
+
|
|
451
|
+
return tracker_class(**filtered_kwargs)
|
|
452
|
+
|
|
453
|
+
@classmethod
|
|
454
|
+
def get_available_backends(cls) -> list[str]:
|
|
455
|
+
"""Get list of available backends on current system.
|
|
456
|
+
|
|
457
|
+
Returns:
|
|
458
|
+
List of available backend names
|
|
459
|
+
"""
|
|
460
|
+
available = []
|
|
461
|
+
|
|
462
|
+
# Always have MediaPipe as fallback
|
|
463
|
+
try:
|
|
464
|
+
import mediapipe as _mp # noqa: F401
|
|
465
|
+
|
|
466
|
+
_ = _mp # Mark as intentionally used for availability check
|
|
467
|
+
available.append("mediapipe")
|
|
468
|
+
except ImportError:
|
|
469
|
+
pass
|
|
470
|
+
|
|
471
|
+
# Check RTMPose CPU
|
|
472
|
+
try:
|
|
473
|
+
from kinemotion.core.rtmpose_cpu import (
|
|
474
|
+
OptimizedCPUTracker as _RTMPoseCPU,
|
|
475
|
+
) # type: ignore
|
|
476
|
+
|
|
477
|
+
_ = _RTMPoseCPU # Mark as intentionally used for availability check
|
|
478
|
+
|
|
479
|
+
available.append("rtmpose-cpu")
|
|
480
|
+
except ImportError:
|
|
481
|
+
pass
|
|
482
|
+
|
|
483
|
+
# Check CUDA
|
|
484
|
+
try:
|
|
485
|
+
import torch
|
|
486
|
+
|
|
487
|
+
if torch.cuda.is_available():
|
|
488
|
+
from kinemotion.core.rtmpose_wrapper import (
|
|
489
|
+
RTMPoseWrapper as _RTMPoseWrapper,
|
|
490
|
+
) # type: ignore
|
|
491
|
+
|
|
492
|
+
_ = _RTMPoseWrapper # Mark as intentionally used for availability check
|
|
493
|
+
|
|
494
|
+
available.append("rtmpose-cuda")
|
|
495
|
+
except ImportError:
|
|
496
|
+
pass
|
|
497
|
+
|
|
498
|
+
# Check CoreML (Apple Silicon)
|
|
499
|
+
import sys
|
|
500
|
+
|
|
501
|
+
if sys.platform == "darwin":
|
|
502
|
+
try:
|
|
503
|
+
from kinemotion.core.rtmpose_wrapper import (
|
|
504
|
+
RTMPoseWrapper as _RTMPoseWrapperMPS,
|
|
505
|
+
) # type: ignore
|
|
506
|
+
|
|
507
|
+
_ = _RTMPoseWrapperMPS # Mark as intentionally used for availability check
|
|
508
|
+
|
|
509
|
+
available.append("rtmpose-coreml")
|
|
510
|
+
except ImportError:
|
|
511
|
+
pass
|
|
512
|
+
|
|
513
|
+
return available
|
|
514
|
+
|
|
515
|
+
@classmethod
|
|
516
|
+
def get_backend_info(cls, backend: str) -> dict[str, str]:
|
|
517
|
+
"""Get information about a backend.
|
|
518
|
+
|
|
519
|
+
Args:
|
|
520
|
+
backend: Backend name
|
|
521
|
+
|
|
522
|
+
Returns:
|
|
523
|
+
Dictionary with backend information
|
|
524
|
+
"""
|
|
525
|
+
info = {
|
|
526
|
+
"mediapipe": {
|
|
527
|
+
"name": "MediaPipe",
|
|
528
|
+
"description": "Baseline pose tracking using MediaPipe Tasks API",
|
|
529
|
+
"performance": "~48 FPS",
|
|
530
|
+
"accuracy": "Baseline (reference)",
|
|
531
|
+
"requirements": "mediapipe package",
|
|
532
|
+
},
|
|
533
|
+
"rtmpose-cpu": {
|
|
534
|
+
"name": "RTMPose CPU",
|
|
535
|
+
"description": "Optimized CPU implementation with ONNX Runtime",
|
|
536
|
+
"performance": "~40-68 FPS (134% of MediaPipe)",
|
|
537
|
+
"accuracy": "9-12px mean difference (1-5% metric accuracy)",
|
|
538
|
+
"requirements": "rtmlib package",
|
|
539
|
+
},
|
|
540
|
+
"rtmpose-cuda": {
|
|
541
|
+
"name": "RTMPose CUDA",
|
|
542
|
+
"description": "NVIDIA GPU acceleration with CUDA",
|
|
543
|
+
"performance": "~133 FPS (271% of MediaPipe)",
|
|
544
|
+
"accuracy": "9-12px mean difference (1-5% metric accuracy)",
|
|
545
|
+
"requirements": "rtmlib + CUDA-capable GPU",
|
|
546
|
+
},
|
|
547
|
+
"rtmpose-coreml": {
|
|
548
|
+
"name": "RTMPose CoreML",
|
|
549
|
+
"description": "Apple Silicon acceleration with CoreML",
|
|
550
|
+
"performance": "~42 FPS (94% of MediaPipe)",
|
|
551
|
+
"accuracy": "9-12px mean difference (1-5% metric accuracy)",
|
|
552
|
+
"requirements": "rtmlib + Apple Silicon",
|
|
553
|
+
},
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
normalized = cls._normalize_backend_name(backend)
|
|
557
|
+
return info.get(normalized, {})
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
def get_tracker_info(tracker: object) -> str:
|
|
561
|
+
"""Get detailed information about a pose tracker instance.
|
|
562
|
+
|
|
563
|
+
Args:
|
|
564
|
+
tracker: Pose tracker instance
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
Formatted string with tracker details
|
|
568
|
+
"""
|
|
569
|
+
tracker_class = type(tracker).__name__
|
|
570
|
+
module = type(tracker).__module__
|
|
571
|
+
|
|
572
|
+
info = f"{tracker_class} (from {module})"
|
|
573
|
+
|
|
574
|
+
# Add backend-specific details
|
|
575
|
+
if tracker_class == "MediaPipePoseTracker":
|
|
576
|
+
info += " [MediaPipe Tasks API]"
|
|
577
|
+
elif tracker_class == "OptimizedCPUTracker":
|
|
578
|
+
# Check if ONNX Runtime has CUDA
|
|
579
|
+
try:
|
|
580
|
+
import onnxruntime as ort
|
|
581
|
+
|
|
582
|
+
providers = ort.get_available_providers()
|
|
583
|
+
if "CUDAExecutionProvider" in providers:
|
|
584
|
+
# Check what providers the session is actually using
|
|
585
|
+
det_session = getattr(tracker, "det_session", None)
|
|
586
|
+
if det_session is not None:
|
|
587
|
+
active_providers = det_session.get_providers()
|
|
588
|
+
if "CUDAExecutionProvider" in active_providers:
|
|
589
|
+
info += " [ONNX Runtime: CUDA]"
|
|
590
|
+
else:
|
|
591
|
+
info += " [ONNX Runtime: CPU]"
|
|
592
|
+
else:
|
|
593
|
+
info += " [ONNX Runtime]"
|
|
594
|
+
else:
|
|
595
|
+
info += " [ONNX Runtime: CPU]"
|
|
596
|
+
except ImportError:
|
|
597
|
+
info += " [ONNX Runtime]"
|
|
598
|
+
elif tracker_class == "RTMPoseWrapper":
|
|
599
|
+
device = getattr(tracker, "device", None)
|
|
600
|
+
if device:
|
|
601
|
+
if device == "cuda":
|
|
602
|
+
try:
|
|
603
|
+
import torch
|
|
604
|
+
|
|
605
|
+
if torch.cuda.is_available():
|
|
606
|
+
device_name = torch.cuda.get_device_name(0)
|
|
607
|
+
info += f" [PyTorch CUDA: {device_name}]"
|
|
608
|
+
else:
|
|
609
|
+
info += " [PyTorch: CPU fallback]"
|
|
610
|
+
except ImportError:
|
|
611
|
+
info += " [PyTorch CUDA]"
|
|
612
|
+
elif device == "mps":
|
|
613
|
+
info += " [PyTorch: Apple Silicon GPU]"
|
|
614
|
+
else:
|
|
615
|
+
info += f" [PyTorch: {device}]"
|
|
616
|
+
else:
|
|
617
|
+
info += " [PyTorch]"
|
|
618
|
+
|
|
619
|
+
return info
|
|
620
|
+
|
|
621
|
+
|
|
161
622
|
def _extract_landmarks_from_results(
|
|
162
623
|
pose_landmarks: mp.tasks.vision.components.containers.NormalizedLandmark, # type: ignore[valid-type]
|
|
163
624
|
) -> dict[str, tuple[float, float, float]]:
|