nrl-tracker 1.9.2__py3-none-any.whl → 1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/METADATA +47 -2
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/RECORD +14 -7
- pytcl/__init__.py +2 -2
- pytcl/core/optional_deps.py +20 -0
- pytcl/gpu/__init__.py +153 -0
- pytcl/gpu/ekf.py +425 -0
- pytcl/gpu/kalman.py +543 -0
- pytcl/gpu/matrix_utils.py +486 -0
- pytcl/gpu/particle_filter.py +568 -0
- pytcl/gpu/ukf.py +476 -0
- pytcl/gpu/utils.py +582 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/top_level.txt +0 -0
pytcl/gpu/ekf.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GPU-accelerated Extended Kalman Filter using CuPy.
|
|
3
|
+
|
|
4
|
+
This module provides GPU-accelerated implementations of the Extended Kalman
|
|
5
|
+
Filter (EKF) for batch processing of multiple tracks with nonlinear dynamics.
|
|
6
|
+
|
|
7
|
+
The EKF handles nonlinear systems by linearizing around the current estimate:
|
|
8
|
+
x_k = f(x_{k-1}) + w (nonlinear dynamics)
|
|
9
|
+
z_k = h(x_k) + v (nonlinear measurement)
|
|
10
|
+
|
|
11
|
+
Key Features
|
|
12
|
+
------------
|
|
13
|
+
- Batch processing of multiple tracks with same or different dynamics
|
|
14
|
+
- Support for user-provided Jacobian functions
|
|
15
|
+
- Numerical Jacobian computation when analytic unavailable
|
|
16
|
+
- Memory-efficient operations using CuPy
|
|
17
|
+
|
|
18
|
+
Examples
|
|
19
|
+
--------
|
|
20
|
+
>>> from pytcl.gpu.ekf import batch_ekf_predict, batch_ekf_update
|
|
21
|
+
>>> import numpy as np
|
|
22
|
+
>>>
|
|
23
|
+
>>> # Define nonlinear dynamics (on CPU, applied per-particle)
|
|
24
|
+
>>> def f_dynamics(x):
|
|
25
|
+
... return np.array([x[0] + x[1], x[1] * 0.99])
|
|
26
|
+
>>>
|
|
27
|
+
>>> def F_jacobian(x):
|
|
28
|
+
... return np.array([[1, 1], [0, 0.99]])
|
|
29
|
+
>>>
|
|
30
|
+
>>> # Batch prediction
|
|
31
|
+
>>> x_pred, P_pred = batch_ekf_predict(x, P, f_dynamics, F_jacobian, Q)
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
from typing import Callable, NamedTuple, Optional
|
|
35
|
+
|
|
36
|
+
import numpy as np
|
|
37
|
+
from numpy.typing import ArrayLike, NDArray
|
|
38
|
+
|
|
39
|
+
from pytcl.core.optional_deps import import_optional, requires
|
|
40
|
+
from pytcl.gpu.utils import ensure_gpu_array, to_cpu
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class BatchEKFPrediction(NamedTuple):
|
|
44
|
+
"""Result of batch EKF prediction.
|
|
45
|
+
|
|
46
|
+
Attributes
|
|
47
|
+
----------
|
|
48
|
+
x : ndarray
|
|
49
|
+
Predicted state estimates, shape (n_tracks, state_dim).
|
|
50
|
+
P : ndarray
|
|
51
|
+
Predicted covariances, shape (n_tracks, state_dim, state_dim).
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
x: NDArray[np.floating]
|
|
55
|
+
P: NDArray[np.floating]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class BatchEKFUpdate(NamedTuple):
|
|
59
|
+
"""Result of batch EKF update.
|
|
60
|
+
|
|
61
|
+
Attributes
|
|
62
|
+
----------
|
|
63
|
+
x : ndarray
|
|
64
|
+
Updated state estimates.
|
|
65
|
+
P : ndarray
|
|
66
|
+
Updated covariances.
|
|
67
|
+
y : ndarray
|
|
68
|
+
Innovations.
|
|
69
|
+
S : ndarray
|
|
70
|
+
Innovation covariances.
|
|
71
|
+
K : ndarray
|
|
72
|
+
Kalman gains.
|
|
73
|
+
likelihood : ndarray
|
|
74
|
+
Measurement likelihoods.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
x: NDArray[np.floating]
|
|
78
|
+
P: NDArray[np.floating]
|
|
79
|
+
y: NDArray[np.floating]
|
|
80
|
+
S: NDArray[np.floating]
|
|
81
|
+
K: NDArray[np.floating]
|
|
82
|
+
likelihood: NDArray[np.floating]
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _compute_numerical_jacobian(
|
|
86
|
+
f: Callable[[NDArray], NDArray],
|
|
87
|
+
x: NDArray,
|
|
88
|
+
eps: float = 1e-7,
|
|
89
|
+
) -> NDArray:
|
|
90
|
+
"""
|
|
91
|
+
Compute numerical Jacobian using central differences.
|
|
92
|
+
|
|
93
|
+
Parameters
|
|
94
|
+
----------
|
|
95
|
+
f : callable
|
|
96
|
+
Function to differentiate.
|
|
97
|
+
x : ndarray
|
|
98
|
+
Point at which to evaluate Jacobian.
|
|
99
|
+
eps : float
|
|
100
|
+
Finite difference step size.
|
|
101
|
+
|
|
102
|
+
Returns
|
|
103
|
+
-------
|
|
104
|
+
J : ndarray
|
|
105
|
+
Jacobian matrix, shape (output_dim, input_dim).
|
|
106
|
+
"""
|
|
107
|
+
x = np.asarray(x).flatten()
|
|
108
|
+
n = len(x)
|
|
109
|
+
f0 = np.asarray(f(x)).flatten()
|
|
110
|
+
m = len(f0)
|
|
111
|
+
|
|
112
|
+
J = np.zeros((m, n))
|
|
113
|
+
for i in range(n):
|
|
114
|
+
x_plus = x.copy()
|
|
115
|
+
x_minus = x.copy()
|
|
116
|
+
x_plus[i] += eps
|
|
117
|
+
x_minus[i] -= eps
|
|
118
|
+
f_plus = np.asarray(f(x_plus)).flatten()
|
|
119
|
+
f_minus = np.asarray(f(x_minus)).flatten()
|
|
120
|
+
J[:, i] = (f_plus - f_minus) / (2 * eps)
|
|
121
|
+
|
|
122
|
+
return J
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@requires("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
126
|
+
def batch_ekf_predict(
|
|
127
|
+
x: ArrayLike,
|
|
128
|
+
P: ArrayLike,
|
|
129
|
+
f: Callable[[NDArray], NDArray],
|
|
130
|
+
F_jacobian: Optional[Callable[[NDArray], NDArray]],
|
|
131
|
+
Q: ArrayLike,
|
|
132
|
+
) -> BatchEKFPrediction:
|
|
133
|
+
"""
|
|
134
|
+
Batch EKF prediction for multiple tracks.
|
|
135
|
+
|
|
136
|
+
Parameters
|
|
137
|
+
----------
|
|
138
|
+
x : array_like
|
|
139
|
+
Current state estimates, shape (n_tracks, state_dim).
|
|
140
|
+
P : array_like
|
|
141
|
+
Current covariances, shape (n_tracks, state_dim, state_dim).
|
|
142
|
+
f : callable
|
|
143
|
+
Nonlinear dynamics function f(x) -> x_next.
|
|
144
|
+
Applied to each track's state vector.
|
|
145
|
+
F_jacobian : callable or None
|
|
146
|
+
Jacobian of dynamics df/dx. If None, computed numerically.
|
|
147
|
+
Q : array_like
|
|
148
|
+
Process noise covariance, shape (state_dim, state_dim)
|
|
149
|
+
or (n_tracks, state_dim, state_dim).
|
|
150
|
+
|
|
151
|
+
Returns
|
|
152
|
+
-------
|
|
153
|
+
result : BatchEKFPrediction
|
|
154
|
+
Predicted states and covariances.
|
|
155
|
+
|
|
156
|
+
Notes
|
|
157
|
+
-----
|
|
158
|
+
The nonlinear dynamics are applied on CPU (Python function), then
|
|
159
|
+
covariance propagation is performed on GPU. This is efficient when
|
|
160
|
+
the number of tracks is large relative to the cost of the dynamics.
|
|
161
|
+
"""
|
|
162
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
163
|
+
|
|
164
|
+
# Convert to numpy for dynamics evaluation
|
|
165
|
+
x_np = np.asarray(x)
|
|
166
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
167
|
+
Q_gpu = ensure_gpu_array(Q, dtype=cp.float64)
|
|
168
|
+
|
|
169
|
+
n_tracks = x_np.shape[0]
|
|
170
|
+
state_dim = x_np.shape[1]
|
|
171
|
+
|
|
172
|
+
# Apply nonlinear dynamics to each track (on CPU)
|
|
173
|
+
x_pred_np = np.zeros_like(x_np)
|
|
174
|
+
F_matrices = np.zeros((n_tracks, state_dim, state_dim))
|
|
175
|
+
|
|
176
|
+
for i in range(n_tracks):
|
|
177
|
+
x_i = x_np[i]
|
|
178
|
+
x_pred_np[i] = f(x_i)
|
|
179
|
+
|
|
180
|
+
# Compute Jacobian
|
|
181
|
+
if F_jacobian is not None:
|
|
182
|
+
F_matrices[i] = F_jacobian(x_i)
|
|
183
|
+
else:
|
|
184
|
+
F_matrices[i] = _compute_numerical_jacobian(f, x_i)
|
|
185
|
+
|
|
186
|
+
# Move to GPU
|
|
187
|
+
x_pred_gpu = ensure_gpu_array(x_pred_np, dtype=cp.float64)
|
|
188
|
+
F_gpu = ensure_gpu_array(F_matrices, dtype=cp.float64)
|
|
189
|
+
|
|
190
|
+
# Handle Q dimensions
|
|
191
|
+
if Q_gpu.ndim == 2:
|
|
192
|
+
Q_batch = cp.broadcast_to(Q_gpu, (n_tracks, state_dim, state_dim))
|
|
193
|
+
else:
|
|
194
|
+
Q_batch = Q_gpu
|
|
195
|
+
|
|
196
|
+
# Covariance prediction on GPU: P_pred = F @ P @ F' + Q
|
|
197
|
+
FP = cp.einsum("nij,njk->nik", F_gpu, P_gpu)
|
|
198
|
+
P_pred = cp.einsum("nij,nkj->nik", FP, F_gpu) + Q_batch
|
|
199
|
+
|
|
200
|
+
# Ensure symmetry
|
|
201
|
+
P_pred = (P_pred + cp.swapaxes(P_pred, -2, -1)) / 2
|
|
202
|
+
|
|
203
|
+
return BatchEKFPrediction(x=x_pred_gpu, P=P_pred)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
@requires("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
207
|
+
def batch_ekf_update(
|
|
208
|
+
x: ArrayLike,
|
|
209
|
+
P: ArrayLike,
|
|
210
|
+
z: ArrayLike,
|
|
211
|
+
h: Callable[[NDArray], NDArray],
|
|
212
|
+
H_jacobian: Optional[Callable[[NDArray], NDArray]],
|
|
213
|
+
R: ArrayLike,
|
|
214
|
+
) -> BatchEKFUpdate:
|
|
215
|
+
"""
|
|
216
|
+
Batch EKF update for multiple tracks.
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
x : array_like
|
|
221
|
+
Predicted state estimates, shape (n_tracks, state_dim).
|
|
222
|
+
P : array_like
|
|
223
|
+
Predicted covariances, shape (n_tracks, state_dim, state_dim).
|
|
224
|
+
z : array_like
|
|
225
|
+
Measurements, shape (n_tracks, meas_dim).
|
|
226
|
+
h : callable
|
|
227
|
+
Nonlinear measurement function h(x) -> z_predicted.
|
|
228
|
+
H_jacobian : callable or None
|
|
229
|
+
Jacobian of measurement function dh/dx. If None, computed numerically.
|
|
230
|
+
R : array_like
|
|
231
|
+
Measurement noise covariance.
|
|
232
|
+
|
|
233
|
+
Returns
|
|
234
|
+
-------
|
|
235
|
+
result : BatchEKFUpdate
|
|
236
|
+
Update results including states, covariances, and statistics.
|
|
237
|
+
"""
|
|
238
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
239
|
+
|
|
240
|
+
# Convert to numpy for measurement evaluation
|
|
241
|
+
x_np = np.asarray(to_cpu(x))
|
|
242
|
+
z_np = np.asarray(z)
|
|
243
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
244
|
+
z_gpu = ensure_gpu_array(z, dtype=cp.float64)
|
|
245
|
+
R_gpu = ensure_gpu_array(R, dtype=cp.float64)
|
|
246
|
+
|
|
247
|
+
n_tracks = x_np.shape[0]
|
|
248
|
+
state_dim = x_np.shape[1]
|
|
249
|
+
meas_dim = z_np.shape[1]
|
|
250
|
+
|
|
251
|
+
# Evaluate measurement function and Jacobian for each track
|
|
252
|
+
z_pred_np = np.zeros((n_tracks, meas_dim))
|
|
253
|
+
H_matrices = np.zeros((n_tracks, meas_dim, state_dim))
|
|
254
|
+
|
|
255
|
+
for i in range(n_tracks):
|
|
256
|
+
x_i = x_np[i]
|
|
257
|
+
z_pred_np[i] = h(x_i)
|
|
258
|
+
|
|
259
|
+
if H_jacobian is not None:
|
|
260
|
+
H_matrices[i] = H_jacobian(x_i)
|
|
261
|
+
else:
|
|
262
|
+
H_matrices[i] = _compute_numerical_jacobian(h, x_i)
|
|
263
|
+
|
|
264
|
+
# Move to GPU
|
|
265
|
+
x_gpu = ensure_gpu_array(x_np, dtype=cp.float64)
|
|
266
|
+
z_pred_gpu = ensure_gpu_array(z_pred_np, dtype=cp.float64)
|
|
267
|
+
H_gpu = ensure_gpu_array(H_matrices, dtype=cp.float64)
|
|
268
|
+
|
|
269
|
+
# Handle R dimensions
|
|
270
|
+
if R_gpu.ndim == 2:
|
|
271
|
+
R_batch = cp.broadcast_to(R_gpu, (n_tracks, meas_dim, meas_dim))
|
|
272
|
+
else:
|
|
273
|
+
R_batch = R_gpu
|
|
274
|
+
|
|
275
|
+
# Innovation
|
|
276
|
+
y = z_gpu - z_pred_gpu
|
|
277
|
+
|
|
278
|
+
# Innovation covariance: S = H @ P @ H' + R
|
|
279
|
+
HP = cp.einsum("nij,njk->nik", H_gpu, P_gpu)
|
|
280
|
+
S = cp.einsum("nij,nkj->nik", HP, H_gpu) + R_batch
|
|
281
|
+
|
|
282
|
+
# Kalman gain: K = P @ H' @ S^{-1}
|
|
283
|
+
PHT = cp.einsum("nij,nkj->nik", P_gpu, H_gpu)
|
|
284
|
+
S_inv = cp.linalg.inv(S)
|
|
285
|
+
K = cp.einsum("nij,njk->nik", PHT, S_inv)
|
|
286
|
+
|
|
287
|
+
# Updated state
|
|
288
|
+
x_upd = x_gpu + cp.einsum("nij,nj->ni", K, y)
|
|
289
|
+
|
|
290
|
+
# Updated covariance (Joseph form)
|
|
291
|
+
eye = cp.eye(state_dim, dtype=cp.float64)
|
|
292
|
+
I_KH = eye - cp.einsum("nij,njk->nik", K, H_gpu)
|
|
293
|
+
P_upd = cp.einsum("nij,njk->nik", I_KH, P_gpu)
|
|
294
|
+
P_upd = cp.einsum("nij,nkj->nik", P_upd, I_KH)
|
|
295
|
+
KRK = cp.einsum("nij,njk,nlk->nil", K, R_batch, K)
|
|
296
|
+
P_upd = P_upd + KRK
|
|
297
|
+
|
|
298
|
+
# Ensure symmetry
|
|
299
|
+
P_upd = (P_upd + cp.swapaxes(P_upd, -2, -1)) / 2
|
|
300
|
+
|
|
301
|
+
# Likelihoods
|
|
302
|
+
mahal_sq = cp.einsum("ni,nij,nj->n", y, S_inv, y)
|
|
303
|
+
sign, logdet = cp.linalg.slogdet(S)
|
|
304
|
+
log_likelihood = -0.5 * (mahal_sq + logdet + meas_dim * np.log(2 * np.pi))
|
|
305
|
+
likelihood = cp.exp(log_likelihood)
|
|
306
|
+
|
|
307
|
+
return BatchEKFUpdate(
|
|
308
|
+
x=x_upd,
|
|
309
|
+
P=P_upd,
|
|
310
|
+
y=y,
|
|
311
|
+
S=S,
|
|
312
|
+
K=K,
|
|
313
|
+
likelihood=likelihood,
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class CuPyExtendedKalmanFilter:
|
|
318
|
+
"""
|
|
319
|
+
GPU-accelerated Extended Kalman Filter for batch processing.
|
|
320
|
+
|
|
321
|
+
Parameters
|
|
322
|
+
----------
|
|
323
|
+
state_dim : int
|
|
324
|
+
Dimension of state vector.
|
|
325
|
+
meas_dim : int
|
|
326
|
+
Dimension of measurement vector.
|
|
327
|
+
f : callable
|
|
328
|
+
Nonlinear dynamics function f(x) -> x_next.
|
|
329
|
+
h : callable
|
|
330
|
+
Nonlinear measurement function h(x) -> z.
|
|
331
|
+
F_jacobian : callable, optional
|
|
332
|
+
Jacobian of dynamics. If None, computed numerically.
|
|
333
|
+
H_jacobian : callable, optional
|
|
334
|
+
Jacobian of measurement. If None, computed numerically.
|
|
335
|
+
Q : array_like, optional
|
|
336
|
+
Process noise covariance.
|
|
337
|
+
R : array_like, optional
|
|
338
|
+
Measurement noise covariance.
|
|
339
|
+
|
|
340
|
+
Examples
|
|
341
|
+
--------
|
|
342
|
+
>>> import numpy as np
|
|
343
|
+
>>> from pytcl.gpu.ekf import CuPyExtendedKalmanFilter
|
|
344
|
+
>>>
|
|
345
|
+
>>> # Nonlinear dynamics
|
|
346
|
+
>>> def f(x):
|
|
347
|
+
... return np.array([x[0] + x[1], x[1] * 0.99])
|
|
348
|
+
>>>
|
|
349
|
+
>>> def h(x):
|
|
350
|
+
... return np.array([np.sqrt(x[0]**2 + x[1]**2)])
|
|
351
|
+
>>>
|
|
352
|
+
>>> ekf = CuPyExtendedKalmanFilter(
|
|
353
|
+
... state_dim=2, meas_dim=1,
|
|
354
|
+
... f=f, h=h,
|
|
355
|
+
... Q=np.eye(2) * 0.01,
|
|
356
|
+
... R=np.array([[0.1]]),
|
|
357
|
+
... )
|
|
358
|
+
"""
|
|
359
|
+
|
|
360
|
+
@requires("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
361
|
+
def __init__(
|
|
362
|
+
self,
|
|
363
|
+
state_dim: int,
|
|
364
|
+
meas_dim: int,
|
|
365
|
+
f: Callable[[NDArray], NDArray],
|
|
366
|
+
h: Callable[[NDArray], NDArray],
|
|
367
|
+
F_jacobian: Optional[Callable[[NDArray], NDArray]] = None,
|
|
368
|
+
H_jacobian: Optional[Callable[[NDArray], NDArray]] = None,
|
|
369
|
+
Q: Optional[ArrayLike] = None,
|
|
370
|
+
R: Optional[ArrayLike] = None,
|
|
371
|
+
):
|
|
372
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Extended Kalman filter")
|
|
373
|
+
|
|
374
|
+
self.state_dim = state_dim
|
|
375
|
+
self.meas_dim = meas_dim
|
|
376
|
+
self.f = f
|
|
377
|
+
self.h = h
|
|
378
|
+
self.F_jacobian = F_jacobian
|
|
379
|
+
self.H_jacobian = H_jacobian
|
|
380
|
+
|
|
381
|
+
if Q is None:
|
|
382
|
+
self.Q = cp.eye(state_dim, dtype=cp.float64) * 0.01
|
|
383
|
+
else:
|
|
384
|
+
self.Q = ensure_gpu_array(Q, dtype=cp.float64)
|
|
385
|
+
|
|
386
|
+
if R is None:
|
|
387
|
+
self.R = cp.eye(meas_dim, dtype=cp.float64)
|
|
388
|
+
else:
|
|
389
|
+
self.R = ensure_gpu_array(R, dtype=cp.float64)
|
|
390
|
+
|
|
391
|
+
def predict(
|
|
392
|
+
self,
|
|
393
|
+
x: ArrayLike,
|
|
394
|
+
P: ArrayLike,
|
|
395
|
+
) -> BatchEKFPrediction:
|
|
396
|
+
"""Perform batch EKF prediction."""
|
|
397
|
+
return batch_ekf_predict(x, P, self.f, self.F_jacobian, self.Q)
|
|
398
|
+
|
|
399
|
+
def update(
|
|
400
|
+
self,
|
|
401
|
+
x: ArrayLike,
|
|
402
|
+
P: ArrayLike,
|
|
403
|
+
z: ArrayLike,
|
|
404
|
+
) -> BatchEKFUpdate:
|
|
405
|
+
"""Perform batch EKF update."""
|
|
406
|
+
return batch_ekf_update(x, P, z, self.h, self.H_jacobian, self.R)
|
|
407
|
+
|
|
408
|
+
def predict_update(
|
|
409
|
+
self,
|
|
410
|
+
x: ArrayLike,
|
|
411
|
+
P: ArrayLike,
|
|
412
|
+
z: ArrayLike,
|
|
413
|
+
) -> BatchEKFUpdate:
|
|
414
|
+
"""Combined prediction and update."""
|
|
415
|
+
pred = self.predict(x, P)
|
|
416
|
+
return self.update(pred.x, pred.P, z)
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
__all__ = [
|
|
420
|
+
"BatchEKFPrediction",
|
|
421
|
+
"BatchEKFUpdate",
|
|
422
|
+
"batch_ekf_predict",
|
|
423
|
+
"batch_ekf_update",
|
|
424
|
+
"CuPyExtendedKalmanFilter",
|
|
425
|
+
]
|