nrl-tracker 1.9.1__py3-none-any.whl → 1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/METADATA +49 -4
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/RECORD +68 -60
- pytcl/__init__.py +2 -2
- pytcl/assignment_algorithms/gating.py +18 -0
- pytcl/assignment_algorithms/jpda.py +56 -0
- pytcl/assignment_algorithms/nd_assignment.py +65 -0
- pytcl/assignment_algorithms/network_flow.py +40 -0
- pytcl/astronomical/ephemerides.py +18 -0
- pytcl/astronomical/orbital_mechanics.py +131 -0
- pytcl/atmosphere/ionosphere.py +44 -0
- pytcl/atmosphere/models.py +29 -0
- pytcl/clustering/dbscan.py +9 -0
- pytcl/clustering/gaussian_mixture.py +20 -0
- pytcl/clustering/hierarchical.py +29 -0
- pytcl/clustering/kmeans.py +9 -0
- pytcl/coordinate_systems/conversions/geodetic.py +46 -0
- pytcl/coordinate_systems/conversions/spherical.py +35 -0
- pytcl/coordinate_systems/rotations/rotations.py +147 -0
- pytcl/core/__init__.py +16 -0
- pytcl/core/maturity.py +346 -0
- pytcl/core/optional_deps.py +20 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +55 -0
- pytcl/dynamic_estimation/imm.py +29 -0
- pytcl/dynamic_estimation/information_filter.py +64 -0
- pytcl/dynamic_estimation/kalman/extended.py +56 -0
- pytcl/dynamic_estimation/kalman/linear.py +69 -0
- pytcl/dynamic_estimation/kalman/unscented.py +81 -0
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +146 -0
- pytcl/dynamic_estimation/rbpf.py +51 -0
- pytcl/dynamic_estimation/smoothers.py +58 -0
- pytcl/dynamic_models/continuous_time/dynamics.py +104 -0
- pytcl/dynamic_models/discrete_time/coordinated_turn.py +6 -0
- pytcl/dynamic_models/discrete_time/singer.py +12 -0
- pytcl/dynamic_models/process_noise/coordinated_turn.py +46 -0
- pytcl/dynamic_models/process_noise/polynomial.py +6 -0
- pytcl/dynamic_models/process_noise/singer.py +52 -0
- pytcl/gpu/__init__.py +153 -0
- pytcl/gpu/ekf.py +425 -0
- pytcl/gpu/kalman.py +543 -0
- pytcl/gpu/matrix_utils.py +486 -0
- pytcl/gpu/particle_filter.py +568 -0
- pytcl/gpu/ukf.py +476 -0
- pytcl/gpu/utils.py +582 -0
- pytcl/gravity/clenshaw.py +60 -0
- pytcl/gravity/egm.py +47 -0
- pytcl/gravity/models.py +34 -0
- pytcl/gravity/spherical_harmonics.py +73 -0
- pytcl/gravity/tides.py +34 -0
- pytcl/mathematical_functions/numerical_integration/quadrature.py +85 -0
- pytcl/mathematical_functions/special_functions/bessel.py +55 -0
- pytcl/mathematical_functions/special_functions/elliptic.py +42 -0
- pytcl/mathematical_functions/special_functions/error_functions.py +49 -0
- pytcl/mathematical_functions/special_functions/gamma_functions.py +43 -0
- pytcl/mathematical_functions/special_functions/lambert_w.py +5 -0
- pytcl/mathematical_functions/special_functions/marcum_q.py +16 -0
- pytcl/navigation/geodesy.py +101 -2
- pytcl/navigation/great_circle.py +71 -0
- pytcl/navigation/rhumb.py +74 -0
- pytcl/performance_evaluation/estimation_metrics.py +70 -0
- pytcl/performance_evaluation/track_metrics.py +30 -0
- pytcl/static_estimation/maximum_likelihood.py +54 -0
- pytcl/static_estimation/robust.py +57 -0
- pytcl/terrain/dem.py +69 -0
- pytcl/terrain/visibility.py +65 -0
- pytcl/trackers/hypothesis.py +65 -0
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.9.1.dist-info → nrl_tracker-1.10.0.dist-info}/top_level.txt +0 -0
pytcl/gpu/__init__.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GPU-accelerated algorithms for the Tracker Component Library.
|
|
3
|
+
|
|
4
|
+
This module provides GPU-accelerated implementations of key tracking algorithms
|
|
5
|
+
using CuPy (NVIDIA GPUs) or MLX (Apple Silicon). These implementations offer
|
|
6
|
+
significant speedups (5-15x) for batch processing of multiple tracks or large
|
|
7
|
+
particle sets.
|
|
8
|
+
|
|
9
|
+
The module automatically selects the best available backend:
|
|
10
|
+
- On Apple Silicon (M1/M2/M3): Uses MLX if installed
|
|
11
|
+
- On systems with NVIDIA GPUs: Uses CuPy if installed
|
|
12
|
+
- Falls back to CPU (numpy) if no GPU backend is available
|
|
13
|
+
|
|
14
|
+
The GPU implementations mirror the CPU API but accept GPU arrays and return
|
|
15
|
+
GPU arrays. Use the utility functions to seamlessly transfer data between
|
|
16
|
+
CPU and GPU.
|
|
17
|
+
|
|
18
|
+
Requirements
|
|
19
|
+
------------
|
|
20
|
+
For NVIDIA GPUs:
|
|
21
|
+
- CUDA-capable GPU
|
|
22
|
+
- CuPy >= 12.0
|
|
23
|
+
|
|
24
|
+
For Apple Silicon:
|
|
25
|
+
- macOS with Apple Silicon (M1, M2, M3, etc.)
|
|
26
|
+
- MLX >= 0.5.0
|
|
27
|
+
|
|
28
|
+
Installation
|
|
29
|
+
------------
|
|
30
|
+
For NVIDIA CUDA:
|
|
31
|
+
pip install pytcl[gpu]
|
|
32
|
+
# or directly:
|
|
33
|
+
pip install cupy-cuda12x # For CUDA 12.x
|
|
34
|
+
|
|
35
|
+
For Apple Silicon:
|
|
36
|
+
pip install pytcl[gpu-apple]
|
|
37
|
+
# or directly:
|
|
38
|
+
pip install mlx
|
|
39
|
+
|
|
40
|
+
Examples
|
|
41
|
+
--------
|
|
42
|
+
Basic usage with automatic backend selection:
|
|
43
|
+
|
|
44
|
+
>>> from pytcl.gpu import is_gpu_available, get_backend
|
|
45
|
+
>>> if is_gpu_available():
|
|
46
|
+
... print(f"GPU available, using {get_backend()} backend")
|
|
47
|
+
|
|
48
|
+
Check platform:
|
|
49
|
+
|
|
50
|
+
>>> from pytcl.gpu import is_apple_silicon, is_mlx_available
|
|
51
|
+
>>> if is_apple_silicon():
|
|
52
|
+
... print("Running on Apple Silicon")
|
|
53
|
+
>>> if is_mlx_available():
|
|
54
|
+
... print("MLX acceleration available")
|
|
55
|
+
|
|
56
|
+
Batch processing example:
|
|
57
|
+
|
|
58
|
+
>>> from pytcl.gpu import batch_kf_predict, to_gpu, to_cpu
|
|
59
|
+
>>> # Move data to GPU (automatically uses best backend)
|
|
60
|
+
>>> x_gpu = to_gpu(x_batch) # (n_tracks, state_dim)
|
|
61
|
+
>>> P_gpu = to_gpu(P_batch) # (n_tracks, state_dim, state_dim)
|
|
62
|
+
>>> # Batch prediction
|
|
63
|
+
>>> x_pred, P_pred = batch_kf_predict(x_gpu, P_gpu, F, Q)
|
|
64
|
+
>>> # Move results back to CPU
|
|
65
|
+
>>> x_pred_cpu = to_cpu(x_pred)
|
|
66
|
+
|
|
67
|
+
See Also
|
|
68
|
+
--------
|
|
69
|
+
pytcl.dynamic_estimation.kalman : CPU Kalman filter implementations
|
|
70
|
+
pytcl.dynamic_estimation.particle_filters : CPU particle filter implementations
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
from pytcl.gpu.utils import (
|
|
74
|
+
get_array_module,
|
|
75
|
+
get_backend,
|
|
76
|
+
is_apple_silicon,
|
|
77
|
+
is_cupy_available,
|
|
78
|
+
is_gpu_available,
|
|
79
|
+
is_mlx_available,
|
|
80
|
+
to_cpu,
|
|
81
|
+
to_gpu,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
__all__ = [
|
|
85
|
+
# Platform detection
|
|
86
|
+
"is_apple_silicon",
|
|
87
|
+
"is_mlx_available",
|
|
88
|
+
"is_cupy_available",
|
|
89
|
+
"get_backend",
|
|
90
|
+
# Availability check
|
|
91
|
+
"is_gpu_available",
|
|
92
|
+
# Utility functions
|
|
93
|
+
"get_array_module",
|
|
94
|
+
"to_gpu",
|
|
95
|
+
"to_cpu",
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# Lazy imports for GPU implementations (only loaded if CuPy is available)
|
|
100
|
+
def __getattr__(name: str):
|
|
101
|
+
"""Lazy import GPU implementations."""
|
|
102
|
+
if name in ("CuPyKalmanFilter", "batch_kf_predict", "batch_kf_update"):
|
|
103
|
+
from pytcl.gpu.kalman import CuPyKalmanFilter, batch_kf_predict, batch_kf_update
|
|
104
|
+
|
|
105
|
+
globals()[name] = locals()[name]
|
|
106
|
+
return locals()[name]
|
|
107
|
+
|
|
108
|
+
if name in ("CuPyExtendedKalmanFilter", "batch_ekf_predict", "batch_ekf_update"):
|
|
109
|
+
from pytcl.gpu.ekf import (
|
|
110
|
+
CuPyExtendedKalmanFilter,
|
|
111
|
+
batch_ekf_predict,
|
|
112
|
+
batch_ekf_update,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
globals()[name] = locals()[name]
|
|
116
|
+
return locals()[name]
|
|
117
|
+
|
|
118
|
+
if name in ("CuPyUnscentedKalmanFilter", "batch_ukf_predict", "batch_ukf_update"):
|
|
119
|
+
from pytcl.gpu.ukf import (
|
|
120
|
+
CuPyUnscentedKalmanFilter,
|
|
121
|
+
batch_ukf_predict,
|
|
122
|
+
batch_ukf_update,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
globals()[name] = locals()[name]
|
|
126
|
+
return locals()[name]
|
|
127
|
+
|
|
128
|
+
if name in (
|
|
129
|
+
"CuPyParticleFilter",
|
|
130
|
+
"gpu_resample_systematic",
|
|
131
|
+
"gpu_resample_multinomial",
|
|
132
|
+
):
|
|
133
|
+
from pytcl.gpu.particle_filter import (
|
|
134
|
+
CuPyParticleFilter,
|
|
135
|
+
gpu_resample_multinomial,
|
|
136
|
+
gpu_resample_systematic,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
globals()[name] = locals()[name]
|
|
140
|
+
return locals()[name]
|
|
141
|
+
|
|
142
|
+
if name in ("gpu_cholesky", "gpu_qr", "gpu_solve", "MemoryPool"):
|
|
143
|
+
from pytcl.gpu.matrix_utils import (
|
|
144
|
+
MemoryPool,
|
|
145
|
+
gpu_cholesky,
|
|
146
|
+
gpu_qr,
|
|
147
|
+
gpu_solve,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
globals()[name] = locals()[name]
|
|
151
|
+
return locals()[name]
|
|
152
|
+
|
|
153
|
+
raise AttributeError(f"module 'pytcl.gpu' has no attribute '{name}'")
|
pytcl/gpu/ekf.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GPU-accelerated Extended Kalman Filter using CuPy.
|
|
3
|
+
|
|
4
|
+
This module provides GPU-accelerated implementations of the Extended Kalman
|
|
5
|
+
Filter (EKF) for batch processing of multiple tracks with nonlinear dynamics.
|
|
6
|
+
|
|
7
|
+
The EKF handles nonlinear systems by linearizing around the current estimate:
|
|
8
|
+
x_k = f(x_{k-1}) + w (nonlinear dynamics)
|
|
9
|
+
z_k = h(x_k) + v (nonlinear measurement)
|
|
10
|
+
|
|
11
|
+
Key Features
|
|
12
|
+
------------
|
|
13
|
+
- Batch processing of multiple tracks with same or different dynamics
|
|
14
|
+
- Support for user-provided Jacobian functions
|
|
15
|
+
- Numerical Jacobian computation when analytic unavailable
|
|
16
|
+
- Memory-efficient operations using CuPy
|
|
17
|
+
|
|
18
|
+
Examples
|
|
19
|
+
--------
|
|
20
|
+
>>> from pytcl.gpu.ekf import batch_ekf_predict, batch_ekf_update
|
|
21
|
+
>>> import numpy as np
|
|
22
|
+
>>>
|
|
23
|
+
>>> # Define nonlinear dynamics (on CPU, applied per-particle)
|
|
24
|
+
>>> def f_dynamics(x):
|
|
25
|
+
... return np.array([x[0] + x[1], x[1] * 0.99])
|
|
26
|
+
>>>
|
|
27
|
+
>>> def F_jacobian(x):
|
|
28
|
+
... return np.array([[1, 1], [0, 0.99]])
|
|
29
|
+
>>>
|
|
30
|
+
>>> # Batch prediction
|
|
31
|
+
>>> x_pred, P_pred = batch_ekf_predict(x, P, f_dynamics, F_jacobian, Q)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
from typing import Callable, NamedTuple, Optional
|
|
35
|
+
|
|
36
|
+
import numpy as np
|
|
37
|
+
from numpy.typing import ArrayLike, NDArray
|
|
38
|
+
|
|
39
|
+
from pytcl.core.optional_deps import import_optional, requires
|
|
40
|
+
from pytcl.gpu.utils import ensure_gpu_array, to_cpu
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class BatchEKFPrediction(NamedTuple):
|
|
44
|
+
"""Result of batch EKF prediction.
|
|
45
|
+
|
|
46
|
+
Attributes
|
|
47
|
+
----------
|
|
48
|
+
x : ndarray
|
|
49
|
+
Predicted state estimates, shape (n_tracks, state_dim).
|
|
50
|
+
P : ndarray
|
|
51
|
+
Predicted covariances, shape (n_tracks, state_dim, state_dim).
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
x: NDArray[np.floating]
|
|
55
|
+
P: NDArray[np.floating]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class BatchEKFUpdate(NamedTuple):
|
|
59
|
+
"""Result of batch EKF update.
|
|
60
|
+
|
|
61
|
+
Attributes
|
|
62
|
+
----------
|
|
63
|
+
x : ndarray
|
|
64
|
+
Updated state estimates.
|
|
65
|
+
P : ndarray
|
|
66
|
+
Updated covariances.
|
|
67
|
+
y : ndarray
|
|
68
|
+
Innovations.
|
|
69
|
+
S : ndarray
|
|
70
|
+
Innovation covariances.
|
|
71
|
+
K : ndarray
|
|
72
|
+
Kalman gains.
|
|
73
|
+
likelihood : ndarray
|
|
74
|
+
Measurement likelihoods.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
x: NDArray[np.floating]
|
|
78
|
+
P: NDArray[np.floating]
|
|
79
|
+
y: NDArray[np.floating]
|
|
80
|
+
S: NDArray[np.floating]
|
|
81
|
+
K: NDArray[np.floating]
|
|
82
|
+
likelihood: NDArray[np.floating]
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _compute_numerical_jacobian(
|
|
86
|
+
f: Callable[[NDArray], NDArray],
|
|
87
|
+
x: NDArray,
|
|
88
|
+
eps: float = 1e-7,
|
|
89
|
+
) -> NDArray:
|
|
90
|
+
"""
|
|
91
|
+
Compute numerical Jacobian using central differences.
|
|
92
|
+
|
|
93
|
+
Parameters
|
|
94
|
+
----------
|
|
95
|
+
f : callable
|
|
96
|
+
Function to differentiate.
|
|
97
|
+
x : ndarray
|
|
98
|
+
Point at which to evaluate Jacobian.
|
|
99
|
+
eps : float
|
|
100
|
+
Finite difference step size.
|
|
101
|
+
|
|
102
|
+
Returns
|
|
103
|
+
-------
|
|
104
|
+
J : ndarray
|
|
105
|
+
Jacobian matrix, shape (output_dim, input_dim).
|
|
106
|
+
"""
|
|
107
|
+
x = np.asarray(x).flatten()
|
|
108
|
+
n = len(x)
|
|
109
|
+
f0 = np.asarray(f(x)).flatten()
|
|
110
|
+
m = len(f0)
|
|
111
|
+
|
|
112
|
+
J = np.zeros((m, n))
|
|
113
|
+
for i in range(n):
|
|
114
|
+
x_plus = x.copy()
|
|
115
|
+
x_minus = x.copy()
|
|
116
|
+
x_plus[i] += eps
|
|
117
|
+
x_minus[i] -= eps
|
|
118
|
+
f_plus = np.asarray(f(x_plus)).flatten()
|
|
119
|
+
f_minus = np.asarray(f(x_minus)).flatten()
|
|
120
|
+
J[:, i] = (f_plus - f_minus) / (2 * eps)
|
|
121
|
+
|
|
122
|
+
return J
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@requires("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
126
|
+
def batch_ekf_predict(
|
|
127
|
+
x: ArrayLike,
|
|
128
|
+
P: ArrayLike,
|
|
129
|
+
f: Callable[[NDArray], NDArray],
|
|
130
|
+
F_jacobian: Optional[Callable[[NDArray], NDArray]],
|
|
131
|
+
Q: ArrayLike,
|
|
132
|
+
) -> BatchEKFPrediction:
|
|
133
|
+
"""
|
|
134
|
+
Batch EKF prediction for multiple tracks.
|
|
135
|
+
|
|
136
|
+
Parameters
|
|
137
|
+
----------
|
|
138
|
+
x : array_like
|
|
139
|
+
Current state estimates, shape (n_tracks, state_dim).
|
|
140
|
+
P : array_like
|
|
141
|
+
Current covariances, shape (n_tracks, state_dim, state_dim).
|
|
142
|
+
f : callable
|
|
143
|
+
Nonlinear dynamics function f(x) -> x_next.
|
|
144
|
+
Applied to each track's state vector.
|
|
145
|
+
F_jacobian : callable or None
|
|
146
|
+
Jacobian of dynamics df/dx. If None, computed numerically.
|
|
147
|
+
Q : array_like
|
|
148
|
+
Process noise covariance, shape (state_dim, state_dim)
|
|
149
|
+
or (n_tracks, state_dim, state_dim).
|
|
150
|
+
|
|
151
|
+
Returns
|
|
152
|
+
-------
|
|
153
|
+
result : BatchEKFPrediction
|
|
154
|
+
Predicted states and covariances.
|
|
155
|
+
|
|
156
|
+
Notes
|
|
157
|
+
-----
|
|
158
|
+
The nonlinear dynamics are applied on CPU (Python function), then
|
|
159
|
+
covariance propagation is performed on GPU. This is efficient when
|
|
160
|
+
the number of tracks is large relative to the cost of the dynamics.
|
|
161
|
+
"""
|
|
162
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
163
|
+
|
|
164
|
+
# Convert to numpy for dynamics evaluation
|
|
165
|
+
x_np = np.asarray(x)
|
|
166
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
167
|
+
Q_gpu = ensure_gpu_array(Q, dtype=cp.float64)
|
|
168
|
+
|
|
169
|
+
n_tracks = x_np.shape[0]
|
|
170
|
+
state_dim = x_np.shape[1]
|
|
171
|
+
|
|
172
|
+
# Apply nonlinear dynamics to each track (on CPU)
|
|
173
|
+
x_pred_np = np.zeros_like(x_np)
|
|
174
|
+
F_matrices = np.zeros((n_tracks, state_dim, state_dim))
|
|
175
|
+
|
|
176
|
+
for i in range(n_tracks):
|
|
177
|
+
x_i = x_np[i]
|
|
178
|
+
x_pred_np[i] = f(x_i)
|
|
179
|
+
|
|
180
|
+
# Compute Jacobian
|
|
181
|
+
if F_jacobian is not None:
|
|
182
|
+
F_matrices[i] = F_jacobian(x_i)
|
|
183
|
+
else:
|
|
184
|
+
F_matrices[i] = _compute_numerical_jacobian(f, x_i)
|
|
185
|
+
|
|
186
|
+
# Move to GPU
|
|
187
|
+
x_pred_gpu = ensure_gpu_array(x_pred_np, dtype=cp.float64)
|
|
188
|
+
F_gpu = ensure_gpu_array(F_matrices, dtype=cp.float64)
|
|
189
|
+
|
|
190
|
+
# Handle Q dimensions
|
|
191
|
+
if Q_gpu.ndim == 2:
|
|
192
|
+
Q_batch = cp.broadcast_to(Q_gpu, (n_tracks, state_dim, state_dim))
|
|
193
|
+
else:
|
|
194
|
+
Q_batch = Q_gpu
|
|
195
|
+
|
|
196
|
+
# Covariance prediction on GPU: P_pred = F @ P @ F' + Q
|
|
197
|
+
FP = cp.einsum("nij,njk->nik", F_gpu, P_gpu)
|
|
198
|
+
P_pred = cp.einsum("nij,nkj->nik", FP, F_gpu) + Q_batch
|
|
199
|
+
|
|
200
|
+
# Ensure symmetry
|
|
201
|
+
P_pred = (P_pred + cp.swapaxes(P_pred, -2, -1)) / 2
|
|
202
|
+
|
|
203
|
+
return BatchEKFPrediction(x=x_pred_gpu, P=P_pred)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
@requires("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
207
|
+
def batch_ekf_update(
|
|
208
|
+
x: ArrayLike,
|
|
209
|
+
P: ArrayLike,
|
|
210
|
+
z: ArrayLike,
|
|
211
|
+
h: Callable[[NDArray], NDArray],
|
|
212
|
+
H_jacobian: Optional[Callable[[NDArray], NDArray]],
|
|
213
|
+
R: ArrayLike,
|
|
214
|
+
) -> BatchEKFUpdate:
|
|
215
|
+
"""
|
|
216
|
+
Batch EKF update for multiple tracks.
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
x : array_like
|
|
221
|
+
Predicted state estimates, shape (n_tracks, state_dim).
|
|
222
|
+
P : array_like
|
|
223
|
+
Predicted covariances, shape (n_tracks, state_dim, state_dim).
|
|
224
|
+
z : array_like
|
|
225
|
+
Measurements, shape (n_tracks, meas_dim).
|
|
226
|
+
h : callable
|
|
227
|
+
Nonlinear measurement function h(x) -> z_predicted.
|
|
228
|
+
H_jacobian : callable or None
|
|
229
|
+
Jacobian of measurement function dh/dx. If None, computed numerically.
|
|
230
|
+
R : array_like
|
|
231
|
+
Measurement noise covariance.
|
|
232
|
+
|
|
233
|
+
Returns
|
|
234
|
+
-------
|
|
235
|
+
result : BatchEKFUpdate
|
|
236
|
+
Update results including states, covariances, and statistics.
|
|
237
|
+
"""
|
|
238
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
239
|
+
|
|
240
|
+
# Convert to numpy for measurement evaluation
|
|
241
|
+
x_np = np.asarray(to_cpu(x))
|
|
242
|
+
z_np = np.asarray(z)
|
|
243
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
244
|
+
z_gpu = ensure_gpu_array(z, dtype=cp.float64)
|
|
245
|
+
R_gpu = ensure_gpu_array(R, dtype=cp.float64)
|
|
246
|
+
|
|
247
|
+
n_tracks = x_np.shape[0]
|
|
248
|
+
state_dim = x_np.shape[1]
|
|
249
|
+
meas_dim = z_np.shape[1]
|
|
250
|
+
|
|
251
|
+
# Evaluate measurement function and Jacobian for each track
|
|
252
|
+
z_pred_np = np.zeros((n_tracks, meas_dim))
|
|
253
|
+
H_matrices = np.zeros((n_tracks, meas_dim, state_dim))
|
|
254
|
+
|
|
255
|
+
for i in range(n_tracks):
|
|
256
|
+
x_i = x_np[i]
|
|
257
|
+
z_pred_np[i] = h(x_i)
|
|
258
|
+
|
|
259
|
+
if H_jacobian is not None:
|
|
260
|
+
H_matrices[i] = H_jacobian(x_i)
|
|
261
|
+
else:
|
|
262
|
+
H_matrices[i] = _compute_numerical_jacobian(h, x_i)
|
|
263
|
+
|
|
264
|
+
# Move to GPU
|
|
265
|
+
x_gpu = ensure_gpu_array(x_np, dtype=cp.float64)
|
|
266
|
+
z_pred_gpu = ensure_gpu_array(z_pred_np, dtype=cp.float64)
|
|
267
|
+
H_gpu = ensure_gpu_array(H_matrices, dtype=cp.float64)
|
|
268
|
+
|
|
269
|
+
# Handle R dimensions
|
|
270
|
+
if R_gpu.ndim == 2:
|
|
271
|
+
R_batch = cp.broadcast_to(R_gpu, (n_tracks, meas_dim, meas_dim))
|
|
272
|
+
else:
|
|
273
|
+
R_batch = R_gpu
|
|
274
|
+
|
|
275
|
+
# Innovation
|
|
276
|
+
y = z_gpu - z_pred_gpu
|
|
277
|
+
|
|
278
|
+
# Innovation covariance: S = H @ P @ H' + R
|
|
279
|
+
HP = cp.einsum("nij,njk->nik", H_gpu, P_gpu)
|
|
280
|
+
S = cp.einsum("nij,nkj->nik", HP, H_gpu) + R_batch
|
|
281
|
+
|
|
282
|
+
# Kalman gain: K = P @ H' @ S^{-1}
|
|
283
|
+
PHT = cp.einsum("nij,nkj->nik", P_gpu, H_gpu)
|
|
284
|
+
S_inv = cp.linalg.inv(S)
|
|
285
|
+
K = cp.einsum("nij,njk->nik", PHT, S_inv)
|
|
286
|
+
|
|
287
|
+
# Updated state
|
|
288
|
+
x_upd = x_gpu + cp.einsum("nij,nj->ni", K, y)
|
|
289
|
+
|
|
290
|
+
# Updated covariance (Joseph form)
|
|
291
|
+
eye = cp.eye(state_dim, dtype=cp.float64)
|
|
292
|
+
I_KH = eye - cp.einsum("nij,njk->nik", K, H_gpu)
|
|
293
|
+
P_upd = cp.einsum("nij,njk->nik", I_KH, P_gpu)
|
|
294
|
+
P_upd = cp.einsum("nij,nkj->nik", P_upd, I_KH)
|
|
295
|
+
KRK = cp.einsum("nij,njk,nlk->nil", K, R_batch, K)
|
|
296
|
+
P_upd = P_upd + KRK
|
|
297
|
+
|
|
298
|
+
# Ensure symmetry
|
|
299
|
+
P_upd = (P_upd + cp.swapaxes(P_upd, -2, -1)) / 2
|
|
300
|
+
|
|
301
|
+
# Likelihoods
|
|
302
|
+
mahal_sq = cp.einsum("ni,nij,nj->n", y, S_inv, y)
|
|
303
|
+
sign, logdet = cp.linalg.slogdet(S)
|
|
304
|
+
log_likelihood = -0.5 * (mahal_sq + logdet + meas_dim * np.log(2 * np.pi))
|
|
305
|
+
likelihood = cp.exp(log_likelihood)
|
|
306
|
+
|
|
307
|
+
return BatchEKFUpdate(
|
|
308
|
+
x=x_upd,
|
|
309
|
+
P=P_upd,
|
|
310
|
+
y=y,
|
|
311
|
+
S=S,
|
|
312
|
+
K=K,
|
|
313
|
+
likelihood=likelihood,
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class CuPyExtendedKalmanFilter:
|
|
318
|
+
"""
|
|
319
|
+
GPU-accelerated Extended Kalman Filter for batch processing.
|
|
320
|
+
|
|
321
|
+
Parameters
|
|
322
|
+
----------
|
|
323
|
+
state_dim : int
|
|
324
|
+
Dimension of state vector.
|
|
325
|
+
meas_dim : int
|
|
326
|
+
Dimension of measurement vector.
|
|
327
|
+
f : callable
|
|
328
|
+
Nonlinear dynamics function f(x) -> x_next.
|
|
329
|
+
h : callable
|
|
330
|
+
Nonlinear measurement function h(x) -> z.
|
|
331
|
+
F_jacobian : callable, optional
|
|
332
|
+
Jacobian of dynamics. If None, computed numerically.
|
|
333
|
+
H_jacobian : callable, optional
|
|
334
|
+
Jacobian of measurement. If None, computed numerically.
|
|
335
|
+
Q : array_like, optional
|
|
336
|
+
Process noise covariance.
|
|
337
|
+
R : array_like, optional
|
|
338
|
+
Measurement noise covariance.
|
|
339
|
+
|
|
340
|
+
Examples
|
|
341
|
+
--------
|
|
342
|
+
>>> import numpy as np
|
|
343
|
+
>>> from pytcl.gpu.ekf import CuPyExtendedKalmanFilter
|
|
344
|
+
>>>
|
|
345
|
+
>>> # Nonlinear dynamics
|
|
346
|
+
>>> def f(x):
|
|
347
|
+
... return np.array([x[0] + x[1], x[1] * 0.99])
|
|
348
|
+
>>>
|
|
349
|
+
>>> def h(x):
|
|
350
|
+
... return np.array([np.sqrt(x[0]**2 + x[1]**2)])
|
|
351
|
+
>>>
|
|
352
|
+
>>> ekf = CuPyExtendedKalmanFilter(
|
|
353
|
+
... state_dim=2, meas_dim=1,
|
|
354
|
+
... f=f, h=h,
|
|
355
|
+
... Q=np.eye(2) * 0.01,
|
|
356
|
+
... R=np.array([[0.1]]),
|
|
357
|
+
... )
|
|
358
|
+
"""
|
|
359
|
+
|
|
360
|
+
@requires("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
361
|
+
def __init__(
|
|
362
|
+
self,
|
|
363
|
+
state_dim: int,
|
|
364
|
+
meas_dim: int,
|
|
365
|
+
f: Callable[[NDArray], NDArray],
|
|
366
|
+
h: Callable[[NDArray], NDArray],
|
|
367
|
+
F_jacobian: Optional[Callable[[NDArray], NDArray]] = None,
|
|
368
|
+
H_jacobian: Optional[Callable[[NDArray], NDArray]] = None,
|
|
369
|
+
Q: Optional[ArrayLike] = None,
|
|
370
|
+
R: Optional[ArrayLike] = None,
|
|
371
|
+
):
|
|
372
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
373
|
+
|
|
374
|
+
self.state_dim = state_dim
|
|
375
|
+
self.meas_dim = meas_dim
|
|
376
|
+
self.f = f
|
|
377
|
+
self.h = h
|
|
378
|
+
self.F_jacobian = F_jacobian
|
|
379
|
+
self.H_jacobian = H_jacobian
|
|
380
|
+
|
|
381
|
+
if Q is None:
|
|
382
|
+
self.Q = cp.eye(state_dim, dtype=cp.float64) * 0.01
|
|
383
|
+
else:
|
|
384
|
+
self.Q = ensure_gpu_array(Q, dtype=cp.float64)
|
|
385
|
+
|
|
386
|
+
if R is None:
|
|
387
|
+
self.R = cp.eye(meas_dim, dtype=cp.float64)
|
|
388
|
+
else:
|
|
389
|
+
self.R = ensure_gpu_array(R, dtype=cp.float64)
|
|
390
|
+
|
|
391
|
+
def predict(
|
|
392
|
+
self,
|
|
393
|
+
x: ArrayLike,
|
|
394
|
+
P: ArrayLike,
|
|
395
|
+
) -> BatchEKFPrediction:
|
|
396
|
+
"""Perform batch EKF prediction."""
|
|
397
|
+
return batch_ekf_predict(x, P, self.f, self.F_jacobian, self.Q)
|
|
398
|
+
|
|
399
|
+
def update(
|
|
400
|
+
self,
|
|
401
|
+
x: ArrayLike,
|
|
402
|
+
P: ArrayLike,
|
|
403
|
+
z: ArrayLike,
|
|
404
|
+
) -> BatchEKFUpdate:
|
|
405
|
+
"""Perform batch EKF update."""
|
|
406
|
+
return batch_ekf_update(x, P, z, self.h, self.H_jacobian, self.R)
|
|
407
|
+
|
|
408
|
+
def predict_update(
|
|
409
|
+
self,
|
|
410
|
+
x: ArrayLike,
|
|
411
|
+
P: ArrayLike,
|
|
412
|
+
z: ArrayLike,
|
|
413
|
+
) -> BatchEKFUpdate:
|
|
414
|
+
"""Combined prediction and update."""
|
|
415
|
+
pred = self.predict(x, P)
|
|
416
|
+
return self.update(pred.x, pred.P, z)
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
__all__ = [
|
|
420
|
+
"BatchEKFPrediction",
|
|
421
|
+
"BatchEKFUpdate",
|
|
422
|
+
"batch_ekf_predict",
|
|
423
|
+
"batch_ekf_update",
|
|
424
|
+
"CuPyExtendedKalmanFilter",
|
|
425
|
+
]
|