nrl-tracker 1.9.2__py3-none-any.whl → 1.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/METADATA +47 -2
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/RECORD +14 -7
- pytcl/__init__.py +2 -2
- pytcl/core/optional_deps.py +20 -0
- pytcl/gpu/__init__.py +153 -0
- pytcl/gpu/ekf.py +425 -0
- pytcl/gpu/kalman.py +543 -0
- pytcl/gpu/matrix_utils.py +486 -0
- pytcl/gpu/particle_filter.py +568 -0
- pytcl/gpu/ukf.py +476 -0
- pytcl/gpu/utils.py +582 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.10.0.dist-info}/top_level.txt +0 -0
pytcl/gpu/ukf.py
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GPU-accelerated Unscented Kalman Filter using CuPy.
|
|
3
|
+
|
|
4
|
+
This module provides GPU-accelerated implementations of the Unscented Kalman
|
|
5
|
+
Filter (UKF) for batch processing of multiple tracks with nonlinear dynamics.
|
|
6
|
+
|
|
7
|
+
The UKF uses sigma points to propagate uncertainty through nonlinear functions
|
|
8
|
+
without requiring Jacobian computation.
|
|
9
|
+
|
|
10
|
+
Key Features
|
|
11
|
+
------------
|
|
12
|
+
- Batch processing of multiple tracks
|
|
13
|
+
- Configurable sigma point parameters (alpha, beta, kappa)
|
|
14
|
+
- GPU-accelerated sigma point generation and transformation
|
|
15
|
+
- Support for nonlinear dynamics and measurements
|
|
16
|
+
|
|
17
|
+
Examples
|
|
18
|
+
--------
|
|
19
|
+
>>> from pytcl.gpu.ukf import batch_ukf_predict
|
|
20
|
+
>>> import numpy as np
|
|
21
|
+
>>>
|
|
22
|
+
>>> def f_dynamics(x):
|
|
23
|
+
... return np.array([x[0] + x[1], x[1] * 0.99])
|
|
24
|
+
>>>
|
|
25
|
+
>>> x_pred, P_pred = batch_ukf_predict(x, P, f_dynamics, Q)
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
from typing import Callable, NamedTuple, Optional, Tuple
|
|
29
|
+
|
|
30
|
+
import numpy as np
|
|
31
|
+
from numpy.typing import ArrayLike, NDArray
|
|
32
|
+
|
|
33
|
+
from pytcl.core.optional_deps import import_optional, requires
|
|
34
|
+
from pytcl.gpu.utils import ensure_gpu_array, to_cpu
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class BatchUKFPrediction(NamedTuple):
|
|
38
|
+
"""Result of batch UKF prediction.
|
|
39
|
+
|
|
40
|
+
Attributes
|
|
41
|
+
----------
|
|
42
|
+
x : ndarray
|
|
43
|
+
Predicted state estimates, shape (n_tracks, state_dim).
|
|
44
|
+
P : ndarray
|
|
45
|
+
Predicted covariances, shape (n_tracks, state_dim, state_dim).
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
x: NDArray[np.floating]
|
|
49
|
+
P: NDArray[np.floating]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class BatchUKFUpdate(NamedTuple):
|
|
53
|
+
"""Result of batch UKF update.
|
|
54
|
+
|
|
55
|
+
Attributes
|
|
56
|
+
----------
|
|
57
|
+
x : ndarray
|
|
58
|
+
Updated state estimates.
|
|
59
|
+
P : ndarray
|
|
60
|
+
Updated covariances.
|
|
61
|
+
y : ndarray
|
|
62
|
+
Innovations.
|
|
63
|
+
S : ndarray
|
|
64
|
+
Innovation covariances.
|
|
65
|
+
likelihood : ndarray
|
|
66
|
+
Measurement likelihoods.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
x: NDArray[np.floating]
|
|
70
|
+
P: NDArray[np.floating]
|
|
71
|
+
y: NDArray[np.floating]
|
|
72
|
+
S: NDArray[np.floating]
|
|
73
|
+
likelihood: NDArray[np.floating]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _compute_sigma_weights(
|
|
77
|
+
n: int,
|
|
78
|
+
alpha: float = 1e-3,
|
|
79
|
+
beta: float = 2.0,
|
|
80
|
+
kappa: float = 0.0,
|
|
81
|
+
) -> Tuple[NDArray, NDArray]:
|
|
82
|
+
"""
|
|
83
|
+
Compute UKF sigma point weights (Merwe scaled sigma points).
|
|
84
|
+
|
|
85
|
+
Parameters
|
|
86
|
+
----------
|
|
87
|
+
n : int
|
|
88
|
+
State dimension.
|
|
89
|
+
alpha : float
|
|
90
|
+
Spread of sigma points (1e-4 to 1).
|
|
91
|
+
beta : float
|
|
92
|
+
Prior knowledge (2 is optimal for Gaussian).
|
|
93
|
+
kappa : float
|
|
94
|
+
Secondary scaling parameter (0 or 3-n).
|
|
95
|
+
|
|
96
|
+
Returns
|
|
97
|
+
-------
|
|
98
|
+
Wm : ndarray
|
|
99
|
+
Mean weights, shape (2n+1,).
|
|
100
|
+
Wc : ndarray
|
|
101
|
+
Covariance weights, shape (2n+1,).
|
|
102
|
+
"""
|
|
103
|
+
lambda_ = alpha**2 * (n + kappa) - n
|
|
104
|
+
|
|
105
|
+
# Weight for mean: first point
|
|
106
|
+
Wm = np.full(2 * n + 1, 1 / (2 * (n + lambda_)))
|
|
107
|
+
Wm[0] = lambda_ / (n + lambda_)
|
|
108
|
+
|
|
109
|
+
# Weight for covariance
|
|
110
|
+
Wc = Wm.copy()
|
|
111
|
+
Wc[0] = Wm[0] + (1 - alpha**2 + beta)
|
|
112
|
+
|
|
113
|
+
return Wm, Wc
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@requires("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
117
|
+
def _generate_sigma_points(
|
|
118
|
+
x: ArrayLike,
|
|
119
|
+
P: ArrayLike,
|
|
120
|
+
alpha: float = 1e-3,
|
|
121
|
+
kappa: float = 0.0,
|
|
122
|
+
) -> NDArray:
|
|
123
|
+
"""
|
|
124
|
+
Generate sigma points for batch of tracks.
|
|
125
|
+
|
|
126
|
+
Parameters
|
|
127
|
+
----------
|
|
128
|
+
x : array_like
|
|
129
|
+
State estimates, shape (n_tracks, state_dim).
|
|
130
|
+
P : array_like
|
|
131
|
+
Covariances, shape (n_tracks, state_dim, state_dim).
|
|
132
|
+
alpha : float
|
|
133
|
+
Spread parameter.
|
|
134
|
+
kappa : float
|
|
135
|
+
Secondary scaling.
|
|
136
|
+
|
|
137
|
+
Returns
|
|
138
|
+
-------
|
|
139
|
+
sigma_points : ndarray
|
|
140
|
+
Sigma points, shape (n_tracks, 2*state_dim+1, state_dim).
|
|
141
|
+
"""
|
|
142
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
143
|
+
|
|
144
|
+
x_gpu = ensure_gpu_array(x, dtype=cp.float64)
|
|
145
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
146
|
+
|
|
147
|
+
n_tracks = x_gpu.shape[0]
|
|
148
|
+
n = x_gpu.shape[1] # state dim
|
|
149
|
+
n_sigma = 2 * n + 1
|
|
150
|
+
|
|
151
|
+
lambda_ = alpha**2 * (n + kappa) - n
|
|
152
|
+
gamma = cp.sqrt(n + lambda_)
|
|
153
|
+
|
|
154
|
+
# Cholesky decomposition of P
|
|
155
|
+
# CuPy's cholesky returns lower triangular
|
|
156
|
+
try:
|
|
157
|
+
L = cp.linalg.cholesky(P_gpu)
|
|
158
|
+
except cp.linalg.LinAlgError:
|
|
159
|
+
# Fallback: eigendecomposition for non-positive-definite
|
|
160
|
+
eigvals, eigvecs = cp.linalg.eigh(P_gpu)
|
|
161
|
+
eigvals = cp.maximum(eigvals, 1e-10)
|
|
162
|
+
L = eigvecs @ cp.diag(cp.sqrt(eigvals)).T
|
|
163
|
+
L = cp.swapaxes(L, -2, -1) # Make it "lower triangular-like"
|
|
164
|
+
|
|
165
|
+
# Scale by gamma
|
|
166
|
+
scaled_L = gamma * L # shape: (n_tracks, n, n)
|
|
167
|
+
|
|
168
|
+
# Generate sigma points
|
|
169
|
+
sigma_points = cp.zeros((n_tracks, n_sigma, n), dtype=cp.float64)
|
|
170
|
+
|
|
171
|
+
# First point is the mean
|
|
172
|
+
sigma_points[:, 0, :] = x_gpu
|
|
173
|
+
|
|
174
|
+
# Remaining points: x ± scaled_L columns
|
|
175
|
+
for i in range(n):
|
|
176
|
+
sigma_points[:, i + 1, :] = x_gpu + scaled_L[:, :, i]
|
|
177
|
+
sigma_points[:, n + i + 1, :] = x_gpu - scaled_L[:, :, i]
|
|
178
|
+
|
|
179
|
+
return sigma_points
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
@requires("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
183
|
+
def batch_ukf_predict(
|
|
184
|
+
x: ArrayLike,
|
|
185
|
+
P: ArrayLike,
|
|
186
|
+
f: Callable[[NDArray], NDArray],
|
|
187
|
+
Q: ArrayLike,
|
|
188
|
+
alpha: float = 1e-3,
|
|
189
|
+
beta: float = 2.0,
|
|
190
|
+
kappa: float = 0.0,
|
|
191
|
+
) -> BatchUKFPrediction:
|
|
192
|
+
"""
|
|
193
|
+
Batch UKF prediction for multiple tracks.
|
|
194
|
+
|
|
195
|
+
Parameters
|
|
196
|
+
----------
|
|
197
|
+
x : array_like
|
|
198
|
+
Current state estimates, shape (n_tracks, state_dim).
|
|
199
|
+
P : array_like
|
|
200
|
+
Current covariances, shape (n_tracks, state_dim, state_dim).
|
|
201
|
+
f : callable
|
|
202
|
+
Nonlinear dynamics function f(x) -> x_next.
|
|
203
|
+
Q : array_like
|
|
204
|
+
Process noise covariance.
|
|
205
|
+
alpha, beta, kappa : float
|
|
206
|
+
Sigma point parameters.
|
|
207
|
+
|
|
208
|
+
Returns
|
|
209
|
+
-------
|
|
210
|
+
result : BatchUKFPrediction
|
|
211
|
+
Predicted states and covariances.
|
|
212
|
+
"""
|
|
213
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
214
|
+
|
|
215
|
+
x_gpu = ensure_gpu_array(x, dtype=cp.float64)
|
|
216
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
217
|
+
Q_gpu = ensure_gpu_array(Q, dtype=cp.float64)
|
|
218
|
+
|
|
219
|
+
n_tracks = x_gpu.shape[0]
|
|
220
|
+
n = x_gpu.shape[1]
|
|
221
|
+
n_sigma = 2 * n + 1
|
|
222
|
+
|
|
223
|
+
# Generate sigma points
|
|
224
|
+
sigma_points = _generate_sigma_points(x_gpu, P_gpu, alpha, kappa)
|
|
225
|
+
|
|
226
|
+
# Compute weights
|
|
227
|
+
Wm, Wc = _compute_sigma_weights(n, alpha, beta, kappa)
|
|
228
|
+
Wm_gpu = ensure_gpu_array(Wm, dtype=cp.float64)
|
|
229
|
+
Wc_gpu = ensure_gpu_array(Wc, dtype=cp.float64)
|
|
230
|
+
|
|
231
|
+
# Propagate sigma points through dynamics (on CPU)
|
|
232
|
+
sigma_np = to_cpu(sigma_points)
|
|
233
|
+
sigma_pred_np = np.zeros_like(sigma_np)
|
|
234
|
+
|
|
235
|
+
for i in range(n_tracks):
|
|
236
|
+
for j in range(n_sigma):
|
|
237
|
+
sigma_pred_np[i, j] = f(sigma_np[i, j])
|
|
238
|
+
|
|
239
|
+
sigma_pred = ensure_gpu_array(sigma_pred_np, dtype=cp.float64)
|
|
240
|
+
|
|
241
|
+
# Predicted mean: sum of weighted sigma points
|
|
242
|
+
x_pred = cp.einsum("j,nj...->n...", Wm_gpu, sigma_pred)
|
|
243
|
+
|
|
244
|
+
# Predicted covariance
|
|
245
|
+
diff = sigma_pred - x_pred[:, None, :] # (n_tracks, n_sigma, n)
|
|
246
|
+
P_pred = cp.einsum("j,nji,njk->nik", Wc_gpu, diff, diff)
|
|
247
|
+
|
|
248
|
+
# Add process noise
|
|
249
|
+
if Q_gpu.ndim == 2:
|
|
250
|
+
P_pred = P_pred + Q_gpu
|
|
251
|
+
else:
|
|
252
|
+
P_pred = P_pred + Q_gpu
|
|
253
|
+
|
|
254
|
+
# Ensure symmetry
|
|
255
|
+
P_pred = (P_pred + cp.swapaxes(P_pred, -2, -1)) / 2
|
|
256
|
+
|
|
257
|
+
return BatchUKFPrediction(x=x_pred, P=P_pred)
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
@requires("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
261
|
+
def batch_ukf_update(
|
|
262
|
+
x: ArrayLike,
|
|
263
|
+
P: ArrayLike,
|
|
264
|
+
z: ArrayLike,
|
|
265
|
+
h: Callable[[NDArray], NDArray],
|
|
266
|
+
R: ArrayLike,
|
|
267
|
+
alpha: float = 1e-3,
|
|
268
|
+
beta: float = 2.0,
|
|
269
|
+
kappa: float = 0.0,
|
|
270
|
+
) -> BatchUKFUpdate:
|
|
271
|
+
"""
|
|
272
|
+
Batch UKF update for multiple tracks.
|
|
273
|
+
|
|
274
|
+
Parameters
|
|
275
|
+
----------
|
|
276
|
+
x : array_like
|
|
277
|
+
Predicted state estimates, shape (n_tracks, state_dim).
|
|
278
|
+
P : array_like
|
|
279
|
+
Predicted covariances, shape (n_tracks, state_dim, state_dim).
|
|
280
|
+
z : array_like
|
|
281
|
+
Measurements, shape (n_tracks, meas_dim).
|
|
282
|
+
h : callable
|
|
283
|
+
Nonlinear measurement function h(x) -> z.
|
|
284
|
+
R : array_like
|
|
285
|
+
Measurement noise covariance.
|
|
286
|
+
alpha, beta, kappa : float
|
|
287
|
+
Sigma point parameters.
|
|
288
|
+
|
|
289
|
+
Returns
|
|
290
|
+
-------
|
|
291
|
+
result : BatchUKFUpdate
|
|
292
|
+
Update results.
|
|
293
|
+
"""
|
|
294
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
295
|
+
|
|
296
|
+
x_gpu = ensure_gpu_array(x, dtype=cp.float64)
|
|
297
|
+
P_gpu = ensure_gpu_array(P, dtype=cp.float64)
|
|
298
|
+
z_gpu = ensure_gpu_array(z, dtype=cp.float64)
|
|
299
|
+
R_gpu = ensure_gpu_array(R, dtype=cp.float64)
|
|
300
|
+
|
|
301
|
+
n_tracks = x_gpu.shape[0]
|
|
302
|
+
n = x_gpu.shape[1]
|
|
303
|
+
m = z_gpu.shape[1]
|
|
304
|
+
n_sigma = 2 * n + 1
|
|
305
|
+
|
|
306
|
+
# Generate sigma points
|
|
307
|
+
sigma_points = _generate_sigma_points(x_gpu, P_gpu, alpha, kappa)
|
|
308
|
+
|
|
309
|
+
# Compute weights
|
|
310
|
+
Wm, Wc = _compute_sigma_weights(n, alpha, beta, kappa)
|
|
311
|
+
Wm_gpu = ensure_gpu_array(Wm, dtype=cp.float64)
|
|
312
|
+
Wc_gpu = ensure_gpu_array(Wc, dtype=cp.float64)
|
|
313
|
+
|
|
314
|
+
# Transform sigma points through measurement function (on CPU)
|
|
315
|
+
sigma_np = to_cpu(sigma_points)
|
|
316
|
+
gamma_np = np.zeros((n_tracks, n_sigma, m))
|
|
317
|
+
|
|
318
|
+
for i in range(n_tracks):
|
|
319
|
+
for j in range(n_sigma):
|
|
320
|
+
gamma_np[i, j] = h(sigma_np[i, j])
|
|
321
|
+
|
|
322
|
+
gamma = ensure_gpu_array(gamma_np, dtype=cp.float64)
|
|
323
|
+
|
|
324
|
+
# Predicted measurement: weighted sum
|
|
325
|
+
z_pred = cp.einsum("j,njk->nk", Wm_gpu, gamma)
|
|
326
|
+
|
|
327
|
+
# Innovation
|
|
328
|
+
y = z_gpu - z_pred
|
|
329
|
+
|
|
330
|
+
# Innovation covariance
|
|
331
|
+
z_diff = gamma - z_pred[:, None, :] # (n_tracks, n_sigma, m)
|
|
332
|
+
S = cp.einsum("j,nji,njk->nik", Wc_gpu, z_diff, z_diff)
|
|
333
|
+
|
|
334
|
+
# Add measurement noise
|
|
335
|
+
if R_gpu.ndim == 2:
|
|
336
|
+
S = S + R_gpu
|
|
337
|
+
else:
|
|
338
|
+
S = S + R_gpu
|
|
339
|
+
|
|
340
|
+
# Cross covariance
|
|
341
|
+
x_np = to_cpu(x_gpu)
|
|
342
|
+
x_diff = sigma_np - x_np[:, None, :] # On CPU
|
|
343
|
+
x_diff_gpu = ensure_gpu_array(x_diff, dtype=cp.float64)
|
|
344
|
+
|
|
345
|
+
Pxz = cp.einsum("j,nji,njk->nik", Wc_gpu, x_diff_gpu, z_diff)
|
|
346
|
+
|
|
347
|
+
# Kalman gain
|
|
348
|
+
S_inv = cp.linalg.inv(S)
|
|
349
|
+
K = cp.einsum("nij,njk->nik", Pxz, S_inv)
|
|
350
|
+
|
|
351
|
+
# Updated state
|
|
352
|
+
x_upd = x_gpu + cp.einsum("nij,nj->ni", K, y)
|
|
353
|
+
|
|
354
|
+
# Updated covariance
|
|
355
|
+
P_upd = P_gpu - cp.einsum("nij,njk,nlk->nil", K, S, K)
|
|
356
|
+
|
|
357
|
+
# Ensure symmetry
|
|
358
|
+
P_upd = (P_upd + cp.swapaxes(P_upd, -2, -1)) / 2
|
|
359
|
+
|
|
360
|
+
# Likelihoods
|
|
361
|
+
mahal_sq = cp.einsum("ni,nij,nj->n", y, S_inv, y)
|
|
362
|
+
sign, logdet = cp.linalg.slogdet(S)
|
|
363
|
+
log_likelihood = -0.5 * (mahal_sq + logdet + m * np.log(2 * np.pi))
|
|
364
|
+
likelihood = cp.exp(log_likelihood)
|
|
365
|
+
|
|
366
|
+
return BatchUKFUpdate(
|
|
367
|
+
x=x_upd,
|
|
368
|
+
P=P_upd,
|
|
369
|
+
y=y,
|
|
370
|
+
S=S,
|
|
371
|
+
likelihood=likelihood,
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
class CuPyUnscentedKalmanFilter:
|
|
376
|
+
"""
|
|
377
|
+
GPU-accelerated Unscented Kalman Filter for batch processing.
|
|
378
|
+
|
|
379
|
+
Parameters
|
|
380
|
+
----------
|
|
381
|
+
state_dim : int
|
|
382
|
+
Dimension of state vector.
|
|
383
|
+
meas_dim : int
|
|
384
|
+
Dimension of measurement vector.
|
|
385
|
+
f : callable
|
|
386
|
+
Nonlinear dynamics function.
|
|
387
|
+
h : callable
|
|
388
|
+
Nonlinear measurement function.
|
|
389
|
+
Q : array_like, optional
|
|
390
|
+
Process noise covariance.
|
|
391
|
+
R : array_like, optional
|
|
392
|
+
Measurement noise covariance.
|
|
393
|
+
alpha : float
|
|
394
|
+
Spread of sigma points (default 1e-3).
|
|
395
|
+
beta : float
|
|
396
|
+
Prior knowledge parameter (default 2.0).
|
|
397
|
+
kappa : float
|
|
398
|
+
Secondary scaling (default 0.0).
|
|
399
|
+
|
|
400
|
+
Examples
|
|
401
|
+
--------
|
|
402
|
+
>>> import numpy as np
|
|
403
|
+
>>> from pytcl.gpu.ukf import CuPyUnscentedKalmanFilter
|
|
404
|
+
>>>
|
|
405
|
+
>>> def f(x):
|
|
406
|
+
... return np.array([x[0] + x[1], x[1]])
|
|
407
|
+
>>>
|
|
408
|
+
>>> def h(x):
|
|
409
|
+
... return np.array([np.sqrt(x[0]**2 + x[1]**2)])
|
|
410
|
+
>>>
|
|
411
|
+
>>> ukf = CuPyUnscentedKalmanFilter(
|
|
412
|
+
... state_dim=2, meas_dim=1,
|
|
413
|
+
... f=f, h=h,
|
|
414
|
+
... )
|
|
415
|
+
"""
|
|
416
|
+
|
|
417
|
+
@requires("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
418
|
+
def __init__(
|
|
419
|
+
self,
|
|
420
|
+
state_dim: int,
|
|
421
|
+
meas_dim: int,
|
|
422
|
+
f: Callable[[NDArray], NDArray],
|
|
423
|
+
h: Callable[[NDArray], NDArray],
|
|
424
|
+
Q: Optional[ArrayLike] = None,
|
|
425
|
+
R: Optional[ArrayLike] = None,
|
|
426
|
+
alpha: float = 1e-3,
|
|
427
|
+
beta: float = 2.0,
|
|
428
|
+
kappa: float = 0.0,
|
|
429
|
+
):
|
|
430
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU Unscented Kalman filter")
|
|
431
|
+
|
|
432
|
+
self.state_dim = state_dim
|
|
433
|
+
self.meas_dim = meas_dim
|
|
434
|
+
self.f = f
|
|
435
|
+
self.h = h
|
|
436
|
+
self.alpha = alpha
|
|
437
|
+
self.beta = beta
|
|
438
|
+
self.kappa = kappa
|
|
439
|
+
|
|
440
|
+
if Q is None:
|
|
441
|
+
self.Q = cp.eye(state_dim, dtype=cp.float64) * 0.01
|
|
442
|
+
else:
|
|
443
|
+
self.Q = ensure_gpu_array(Q, dtype=cp.float64)
|
|
444
|
+
|
|
445
|
+
if R is None:
|
|
446
|
+
self.R = cp.eye(meas_dim, dtype=cp.float64)
|
|
447
|
+
else:
|
|
448
|
+
self.R = ensure_gpu_array(R, dtype=cp.float64)
|
|
449
|
+
|
|
450
|
+
def predict(self, x: ArrayLike, P: ArrayLike) -> BatchUKFPrediction:
|
|
451
|
+
"""Perform batch UKF prediction."""
|
|
452
|
+
return batch_ukf_predict(
|
|
453
|
+
x, P, self.f, self.Q, self.alpha, self.beta, self.kappa
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
def update(self, x: ArrayLike, P: ArrayLike, z: ArrayLike) -> BatchUKFUpdate:
|
|
457
|
+
"""Perform batch UKF update."""
|
|
458
|
+
return batch_ukf_update(
|
|
459
|
+
x, P, z, self.h, self.R, self.alpha, self.beta, self.kappa
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
def predict_update(
|
|
463
|
+
self, x: ArrayLike, P: ArrayLike, z: ArrayLike
|
|
464
|
+
) -> BatchUKFUpdate:
|
|
465
|
+
"""Combined prediction and update."""
|
|
466
|
+
pred = self.predict(x, P)
|
|
467
|
+
return self.update(pred.x, pred.P, z)
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
__all__ = [
|
|
471
|
+
"BatchUKFPrediction",
|
|
472
|
+
"BatchUKFUpdate",
|
|
473
|
+
"batch_ukf_predict",
|
|
474
|
+
"batch_ukf_update",
|
|
475
|
+
"CuPyUnscentedKalmanFilter",
|
|
476
|
+
]
|