nrl-tracker 1.9.2__py3-none-any.whl → 1.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.11.0.dist-info}/METADATA +49 -4
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.11.0.dist-info}/RECORD +19 -12
- pytcl/__init__.py +3 -3
- pytcl/assignment_algorithms/nd_assignment.py +359 -1
- pytcl/coordinate_systems/jacobians/jacobians.py +63 -33
- pytcl/core/optional_deps.py +20 -0
- pytcl/dynamic_estimation/kalman/matrix_utils.py +133 -35
- pytcl/gpu/__init__.py +153 -0
- pytcl/gpu/ekf.py +433 -0
- pytcl/gpu/kalman.py +543 -0
- pytcl/gpu/matrix_utils.py +491 -0
- pytcl/gpu/particle_filter.py +578 -0
- pytcl/gpu/ukf.py +476 -0
- pytcl/gpu/utils.py +582 -0
- pytcl/gravity/clenshaw.py +8 -0
- pytcl/gravity/spherical_harmonics.py +17 -10
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.11.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.11.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.9.2.dist-info → nrl_tracker-1.11.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,578 @@
|
|
|
1
|
+
"""
|
|
2
|
+
GPU-accelerated Particle Filter using CuPy.
|
|
3
|
+
|
|
4
|
+
This module provides GPU-accelerated implementations of particle filtering
|
|
5
|
+
algorithms for highly nonlinear and non-Gaussian state estimation.
|
|
6
|
+
|
|
7
|
+
Key Features
|
|
8
|
+
------------
|
|
9
|
+
- GPU-accelerated resampling (systematic, multinomial)
|
|
10
|
+
- Parallel weight computation
|
|
11
|
+
- Batch processing of multiple particle filters
|
|
12
|
+
- Efficient memory management
|
|
13
|
+
|
|
14
|
+
Performance
|
|
15
|
+
-----------
|
|
16
|
+
The GPU implementation achieves 8-15x speedup compared to CPU for:
|
|
17
|
+
- Large particle counts (N > 1000)
|
|
18
|
+
- Parallel processing of multiple targets
|
|
19
|
+
|
|
20
|
+
Examples
|
|
21
|
+
--------
|
|
22
|
+
>>> from pytcl.gpu.particle_filter import CuPyParticleFilter
|
|
23
|
+
>>> import numpy as np
|
|
24
|
+
>>>
|
|
25
|
+
>>> def dynamics(particles, t):
|
|
26
|
+
... # Propagate particles through nonlinear dynamics
|
|
27
|
+
... return particles + np.random.randn(*particles.shape) * 0.1
|
|
28
|
+
>>>
|
|
29
|
+
>>> def likelihood(particles, measurement):
|
|
30
|
+
... # Compute likelihood for each particle
|
|
31
|
+
... diff = particles[:, 0] - measurement
|
|
32
|
+
... return np.exp(-0.5 * diff**2)
|
|
33
|
+
>>>
|
|
34
|
+
>>> pf = CuPyParticleFilter(n_particles=10000, state_dim=2)
|
|
35
|
+
>>> pf.predict(dynamics)
|
|
36
|
+
>>> pf.update(measurement, likelihood)
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
from typing import Any, Callable, NamedTuple, Tuple
|
|
40
|
+
|
|
41
|
+
import numpy as np
|
|
42
|
+
from numpy.typing import ArrayLike, NDArray
|
|
43
|
+
|
|
44
|
+
from pytcl.core.optional_deps import import_optional, requires
|
|
45
|
+
from pytcl.gpu.utils import ensure_gpu_array, to_cpu
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class ParticleFilterState(NamedTuple):
|
|
49
|
+
"""State of a particle filter.
|
|
50
|
+
|
|
51
|
+
Attributes
|
|
52
|
+
----------
|
|
53
|
+
particles : ndarray
|
|
54
|
+
Particle states, shape (n_particles, state_dim).
|
|
55
|
+
weights : ndarray
|
|
56
|
+
Normalized particle weights, shape (n_particles,).
|
|
57
|
+
ess : float
|
|
58
|
+
Effective sample size.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
particles: NDArray[np.floating]
|
|
62
|
+
weights: NDArray[np.floating]
|
|
63
|
+
ess: float
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
67
|
+
def gpu_effective_sample_size(weights: ArrayLike) -> float:
|
|
68
|
+
"""
|
|
69
|
+
Compute effective sample size on GPU.
|
|
70
|
+
|
|
71
|
+
ESS = 1 / sum(w_i^2)
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
weights : array_like
|
|
76
|
+
Normalized particle weights.
|
|
77
|
+
|
|
78
|
+
Returns
|
|
79
|
+
-------
|
|
80
|
+
ess : float
|
|
81
|
+
Effective sample size.
|
|
82
|
+
"""
|
|
83
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
84
|
+
w = ensure_gpu_array(weights, dtype=cp.float64)
|
|
85
|
+
ess = 1.0 / float(cp.sum(w**2))
|
|
86
|
+
return ess
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
90
|
+
def gpu_resample_systematic(weights: ArrayLike) -> NDArray[np.intp]:
|
|
91
|
+
"""
|
|
92
|
+
GPU-accelerated systematic resampling.
|
|
93
|
+
|
|
94
|
+
Systematic resampling uses a single random number to select particles,
|
|
95
|
+
resulting in low variance and O(N) complexity.
|
|
96
|
+
|
|
97
|
+
Parameters
|
|
98
|
+
----------
|
|
99
|
+
weights : array_like
|
|
100
|
+
Normalized particle weights, shape (n_particles,).
|
|
101
|
+
|
|
102
|
+
Returns
|
|
103
|
+
-------
|
|
104
|
+
indices : ndarray
|
|
105
|
+
Resampled particle indices, shape (n_particles,).
|
|
106
|
+
|
|
107
|
+
Examples
|
|
108
|
+
--------
|
|
109
|
+
>>> import numpy as np
|
|
110
|
+
>>> from pytcl.gpu.particle_filter import gpu_resample_systematic
|
|
111
|
+
>>> weights = np.array([0.1, 0.3, 0.4, 0.2])
|
|
112
|
+
>>> indices = gpu_resample_systematic(weights)
|
|
113
|
+
>>> # Particles 1 and 2 will be selected more often
|
|
114
|
+
"""
|
|
115
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
116
|
+
|
|
117
|
+
w = ensure_gpu_array(weights, dtype=cp.float64)
|
|
118
|
+
n = len(w)
|
|
119
|
+
|
|
120
|
+
# Cumulative sum of weights
|
|
121
|
+
cumsum = cp.cumsum(w)
|
|
122
|
+
|
|
123
|
+
# Systematic sampling positions
|
|
124
|
+
u0 = cp.random.uniform(0, 1.0 / n)
|
|
125
|
+
positions = u0 + cp.arange(n, dtype=cp.float64) / n
|
|
126
|
+
|
|
127
|
+
# Find indices using searchsorted
|
|
128
|
+
indices = cp.searchsorted(cumsum, positions)
|
|
129
|
+
|
|
130
|
+
# Clip to valid range
|
|
131
|
+
indices = cp.clip(indices, 0, n - 1)
|
|
132
|
+
|
|
133
|
+
return indices
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
137
|
+
def gpu_resample_multinomial(weights: ArrayLike) -> NDArray[np.intp]:
|
|
138
|
+
"""
|
|
139
|
+
GPU-accelerated multinomial resampling.
|
|
140
|
+
|
|
141
|
+
Multinomial resampling samples particles independently according
|
|
142
|
+
to their weights.
|
|
143
|
+
|
|
144
|
+
Parameters
|
|
145
|
+
----------
|
|
146
|
+
weights : array_like
|
|
147
|
+
Normalized particle weights, shape (n_particles,).
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
indices : ndarray
|
|
152
|
+
Resampled particle indices, shape (n_particles,).
|
|
153
|
+
|
|
154
|
+
Notes
|
|
155
|
+
-----
|
|
156
|
+
Multinomial resampling has higher variance than systematic resampling
|
|
157
|
+
but is simpler and can be more efficient on GPU for certain sizes.
|
|
158
|
+
"""
|
|
159
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
160
|
+
|
|
161
|
+
w = ensure_gpu_array(weights, dtype=cp.float64)
|
|
162
|
+
n = len(w)
|
|
163
|
+
|
|
164
|
+
# Cumulative sum
|
|
165
|
+
cumsum = cp.cumsum(w)
|
|
166
|
+
|
|
167
|
+
# Generate random samples
|
|
168
|
+
u = cp.random.uniform(0, 1, n)
|
|
169
|
+
|
|
170
|
+
# Find indices
|
|
171
|
+
indices = cp.searchsorted(cumsum, u)
|
|
172
|
+
indices = cp.clip(indices, 0, n - 1)
|
|
173
|
+
|
|
174
|
+
return indices
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
178
|
+
def gpu_resample_stratified(weights: ArrayLike) -> NDArray[np.intp]:
|
|
179
|
+
"""
|
|
180
|
+
GPU-accelerated stratified resampling.
|
|
181
|
+
|
|
182
|
+
Stratified resampling divides the CDF into N equal strata and samples
|
|
183
|
+
one particle from each stratum.
|
|
184
|
+
|
|
185
|
+
Parameters
|
|
186
|
+
----------
|
|
187
|
+
weights : array_like
|
|
188
|
+
Normalized particle weights, shape (n_particles,).
|
|
189
|
+
|
|
190
|
+
Returns
|
|
191
|
+
-------
|
|
192
|
+
indices : ndarray
|
|
193
|
+
Resampled particle indices, shape (n_particles,).
|
|
194
|
+
"""
|
|
195
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
196
|
+
|
|
197
|
+
w = ensure_gpu_array(weights, dtype=cp.float64)
|
|
198
|
+
n = len(w)
|
|
199
|
+
|
|
200
|
+
# Cumulative sum
|
|
201
|
+
cumsum = cp.cumsum(w)
|
|
202
|
+
|
|
203
|
+
# Stratified sampling: one random number per stratum
|
|
204
|
+
u = (cp.arange(n, dtype=cp.float64) + cp.random.uniform(0, 1, n)) / n
|
|
205
|
+
|
|
206
|
+
# Find indices
|
|
207
|
+
indices = cp.searchsorted(cumsum, u)
|
|
208
|
+
indices = cp.clip(indices, 0, n - 1)
|
|
209
|
+
|
|
210
|
+
return indices
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
214
|
+
def gpu_normalize_weights(
|
|
215
|
+
log_weights: ArrayLike,
|
|
216
|
+
) -> Tuple[NDArray[np.floating[Any]], float]:
|
|
217
|
+
"""
|
|
218
|
+
Normalize log weights to proper weights on GPU.
|
|
219
|
+
|
|
220
|
+
Uses log-sum-exp trick for numerical stability.
|
|
221
|
+
|
|
222
|
+
Parameters
|
|
223
|
+
----------
|
|
224
|
+
log_weights : array_like
|
|
225
|
+
Unnormalized log weights, shape (n_particles,).
|
|
226
|
+
|
|
227
|
+
Returns
|
|
228
|
+
-------
|
|
229
|
+
weights : ndarray
|
|
230
|
+
Normalized weights, shape (n_particles,).
|
|
231
|
+
log_likelihood : float
|
|
232
|
+
Log of the normalization constant.
|
|
233
|
+
"""
|
|
234
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
235
|
+
|
|
236
|
+
log_w = ensure_gpu_array(log_weights, dtype=cp.float64)
|
|
237
|
+
|
|
238
|
+
# Log-sum-exp for numerical stability
|
|
239
|
+
max_log_w = cp.max(log_w)
|
|
240
|
+
log_sum = max_log_w + cp.log(cp.sum(cp.exp(log_w - max_log_w)))
|
|
241
|
+
|
|
242
|
+
# Normalized weights
|
|
243
|
+
weights = cp.exp(log_w - log_sum)
|
|
244
|
+
|
|
245
|
+
return weights, float(log_sum)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
class CuPyParticleFilter:
|
|
249
|
+
"""
|
|
250
|
+
GPU-accelerated Bootstrap Particle Filter.
|
|
251
|
+
|
|
252
|
+
This class implements the Sequential Importance Resampling (SIR)
|
|
253
|
+
particle filter with GPU acceleration.
|
|
254
|
+
|
|
255
|
+
Parameters
|
|
256
|
+
----------
|
|
257
|
+
n_particles : int
|
|
258
|
+
Number of particles.
|
|
259
|
+
state_dim : int
|
|
260
|
+
Dimension of state vector.
|
|
261
|
+
resample_method : str
|
|
262
|
+
Resampling method: 'systematic', 'multinomial', or 'stratified'.
|
|
263
|
+
resample_threshold : float
|
|
264
|
+
ESS threshold for resampling (as fraction of n_particles).
|
|
265
|
+
|
|
266
|
+
Attributes
|
|
267
|
+
----------
|
|
268
|
+
particles : cupy.ndarray
|
|
269
|
+
Current particle states, shape (n_particles, state_dim).
|
|
270
|
+
weights : cupy.ndarray
|
|
271
|
+
Current particle weights, shape (n_particles,).
|
|
272
|
+
|
|
273
|
+
Examples
|
|
274
|
+
--------
|
|
275
|
+
>>> import numpy as np
|
|
276
|
+
>>> from pytcl.gpu.particle_filter import CuPyParticleFilter
|
|
277
|
+
>>>
|
|
278
|
+
>>> # Initialize filter
|
|
279
|
+
>>> pf = CuPyParticleFilter(n_particles=10000, state_dim=4)
|
|
280
|
+
>>> pf.initialize(initial_state, initial_cov)
|
|
281
|
+
>>>
|
|
282
|
+
>>> # Run filter
|
|
283
|
+
>>> for measurement in measurements:
|
|
284
|
+
... pf.predict(dynamics_fn)
|
|
285
|
+
... pf.update(measurement, likelihood_fn)
|
|
286
|
+
... state_estimate = pf.get_estimate()
|
|
287
|
+
"""
|
|
288
|
+
|
|
289
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
290
|
+
def __init__(
|
|
291
|
+
self,
|
|
292
|
+
n_particles: int,
|
|
293
|
+
state_dim: int,
|
|
294
|
+
resample_method: str = "systematic",
|
|
295
|
+
resample_threshold: float = 0.5,
|
|
296
|
+
):
|
|
297
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
298
|
+
|
|
299
|
+
self.n_particles = n_particles
|
|
300
|
+
self.state_dim = state_dim
|
|
301
|
+
self.resample_threshold = resample_threshold
|
|
302
|
+
|
|
303
|
+
# Select resampling function
|
|
304
|
+
if resample_method == "systematic":
|
|
305
|
+
self._resample_fn = gpu_resample_systematic
|
|
306
|
+
elif resample_method == "multinomial":
|
|
307
|
+
self._resample_fn = gpu_resample_multinomial
|
|
308
|
+
elif resample_method == "stratified":
|
|
309
|
+
self._resample_fn = gpu_resample_stratified
|
|
310
|
+
else:
|
|
311
|
+
raise ValueError(f"Unknown resample method: {resample_method}")
|
|
312
|
+
|
|
313
|
+
# Initialize particles and weights
|
|
314
|
+
self.particles = cp.zeros((n_particles, state_dim), dtype=cp.float64)
|
|
315
|
+
self.weights = cp.ones(n_particles, dtype=cp.float64) / n_particles
|
|
316
|
+
|
|
317
|
+
def initialize(
|
|
318
|
+
self,
|
|
319
|
+
mean: ArrayLike,
|
|
320
|
+
cov: ArrayLike,
|
|
321
|
+
) -> None:
|
|
322
|
+
"""
|
|
323
|
+
Initialize particles from Gaussian distribution.
|
|
324
|
+
|
|
325
|
+
Parameters
|
|
326
|
+
----------
|
|
327
|
+
mean : array_like
|
|
328
|
+
Mean state, shape (state_dim,).
|
|
329
|
+
cov : array_like
|
|
330
|
+
Covariance matrix, shape (state_dim, state_dim).
|
|
331
|
+
"""
|
|
332
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
333
|
+
|
|
334
|
+
mean = np.asarray(mean).flatten()
|
|
335
|
+
cov = np.asarray(cov)
|
|
336
|
+
|
|
337
|
+
# Sample from multivariate normal on CPU (CuPy lacks this)
|
|
338
|
+
samples = np.random.multivariate_normal(mean, cov, self.n_particles)
|
|
339
|
+
self.particles = ensure_gpu_array(samples, dtype=cp.float64)
|
|
340
|
+
self.weights = cp.ones(self.n_particles, dtype=cp.float64) / self.n_particles
|
|
341
|
+
|
|
342
|
+
def initialize_uniform(
|
|
343
|
+
self,
|
|
344
|
+
low: ArrayLike,
|
|
345
|
+
high: ArrayLike,
|
|
346
|
+
) -> None:
|
|
347
|
+
"""
|
|
348
|
+
Initialize particles from uniform distribution.
|
|
349
|
+
|
|
350
|
+
Parameters
|
|
351
|
+
----------
|
|
352
|
+
low : array_like
|
|
353
|
+
Lower bounds, shape (state_dim,).
|
|
354
|
+
high : array_like
|
|
355
|
+
Upper bounds, shape (state_dim,).
|
|
356
|
+
"""
|
|
357
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
358
|
+
|
|
359
|
+
low = ensure_gpu_array(low, dtype=cp.float64)
|
|
360
|
+
high = ensure_gpu_array(high, dtype=cp.float64)
|
|
361
|
+
|
|
362
|
+
# Sample uniformly
|
|
363
|
+
u = cp.random.uniform(0, 1, (self.n_particles, self.state_dim))
|
|
364
|
+
self.particles = low + u * (high - low)
|
|
365
|
+
self.weights = cp.ones(self.n_particles, dtype=cp.float64) / self.n_particles
|
|
366
|
+
|
|
367
|
+
def predict(
|
|
368
|
+
self,
|
|
369
|
+
dynamics_fn: Callable[[NDArray[np.floating[Any]]], NDArray[np.floating[Any]]],
|
|
370
|
+
*args: Any,
|
|
371
|
+
**kwargs: Any,
|
|
372
|
+
) -> None:
|
|
373
|
+
"""
|
|
374
|
+
Propagate particles through dynamics.
|
|
375
|
+
|
|
376
|
+
Parameters
|
|
377
|
+
----------
|
|
378
|
+
dynamics_fn : callable
|
|
379
|
+
Function that takes particles (N, state_dim) and returns
|
|
380
|
+
propagated particles (N, state_dim).
|
|
381
|
+
*args, **kwargs
|
|
382
|
+
Additional arguments passed to dynamics_fn.
|
|
383
|
+
|
|
384
|
+
Notes
|
|
385
|
+
-----
|
|
386
|
+
The dynamics function receives CuPy arrays if GPU is available.
|
|
387
|
+
It should return arrays of the same type.
|
|
388
|
+
"""
|
|
389
|
+
# Apply dynamics (may be on CPU or GPU depending on function)
|
|
390
|
+
self.particles = dynamics_fn(self.particles, *args, **kwargs)
|
|
391
|
+
|
|
392
|
+
def update(
|
|
393
|
+
self,
|
|
394
|
+
measurement: ArrayLike,
|
|
395
|
+
likelihood_fn: Callable[
|
|
396
|
+
[NDArray[np.floating[Any]], NDArray[np.floating[Any]]],
|
|
397
|
+
NDArray[np.floating[Any]],
|
|
398
|
+
],
|
|
399
|
+
) -> float:
|
|
400
|
+
"""
|
|
401
|
+
Update weights based on measurement likelihood.
|
|
402
|
+
|
|
403
|
+
Parameters
|
|
404
|
+
----------
|
|
405
|
+
measurement : array_like
|
|
406
|
+
Measurement vector.
|
|
407
|
+
likelihood_fn : callable
|
|
408
|
+
Function that computes likelihood for each particle.
|
|
409
|
+
Takes (particles, measurement) and returns likelihoods (n_particles,).
|
|
410
|
+
|
|
411
|
+
Returns
|
|
412
|
+
-------
|
|
413
|
+
log_likelihood : float
|
|
414
|
+
Log of the marginal likelihood (normalization constant).
|
|
415
|
+
"""
|
|
416
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
417
|
+
|
|
418
|
+
z = ensure_gpu_array(measurement, dtype=cp.float64)
|
|
419
|
+
|
|
420
|
+
# Compute likelihoods
|
|
421
|
+
likelihoods = likelihood_fn(self.particles, z)
|
|
422
|
+
likelihoods = ensure_gpu_array(likelihoods, dtype=cp.float64)
|
|
423
|
+
|
|
424
|
+
# Update weights
|
|
425
|
+
log_weights = cp.log(self.weights) + cp.log(likelihoods + 1e-300)
|
|
426
|
+
|
|
427
|
+
# Normalize
|
|
428
|
+
self.weights, log_likelihood = gpu_normalize_weights(log_weights)
|
|
429
|
+
|
|
430
|
+
# Resample if ESS drops below threshold
|
|
431
|
+
ess = gpu_effective_sample_size(self.weights)
|
|
432
|
+
if ess < self.resample_threshold * self.n_particles:
|
|
433
|
+
self._resample()
|
|
434
|
+
|
|
435
|
+
return log_likelihood
|
|
436
|
+
|
|
437
|
+
def _resample(self) -> None:
|
|
438
|
+
"""Perform resampling."""
|
|
439
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
440
|
+
|
|
441
|
+
indices = self._resample_fn(self.weights)
|
|
442
|
+
self.particles = self.particles[indices]
|
|
443
|
+
self.weights = cp.ones(self.n_particles, dtype=cp.float64) / self.n_particles
|
|
444
|
+
|
|
445
|
+
def get_estimate(self) -> NDArray[np.floating]:
|
|
446
|
+
"""
|
|
447
|
+
Compute weighted mean estimate.
|
|
448
|
+
|
|
449
|
+
Returns
|
|
450
|
+
-------
|
|
451
|
+
estimate : ndarray
|
|
452
|
+
Weighted mean state, shape (state_dim,).
|
|
453
|
+
"""
|
|
454
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
455
|
+
estimate = cp.sum(self.particles * self.weights[:, None], axis=0)
|
|
456
|
+
return estimate
|
|
457
|
+
|
|
458
|
+
def get_covariance(self) -> NDArray[np.floating]:
|
|
459
|
+
"""
|
|
460
|
+
Compute weighted covariance estimate.
|
|
461
|
+
|
|
462
|
+
Returns
|
|
463
|
+
-------
|
|
464
|
+
cov : ndarray
|
|
465
|
+
Weighted covariance, shape (state_dim, state_dim).
|
|
466
|
+
"""
|
|
467
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
468
|
+
|
|
469
|
+
mean = self.get_estimate()
|
|
470
|
+
diff = self.particles - mean
|
|
471
|
+
cov = cp.einsum("n,ni,nj->ij", self.weights, diff, diff)
|
|
472
|
+
return cov
|
|
473
|
+
|
|
474
|
+
def get_ess(self) -> float:
|
|
475
|
+
"""Get current effective sample size."""
|
|
476
|
+
return gpu_effective_sample_size(self.weights)
|
|
477
|
+
|
|
478
|
+
def get_state(self) -> ParticleFilterState:
|
|
479
|
+
"""
|
|
480
|
+
Get current filter state.
|
|
481
|
+
|
|
482
|
+
Returns
|
|
483
|
+
-------
|
|
484
|
+
state : ParticleFilterState
|
|
485
|
+
Named tuple with particles, weights, and ESS.
|
|
486
|
+
"""
|
|
487
|
+
return ParticleFilterState(
|
|
488
|
+
particles=self.particles,
|
|
489
|
+
weights=self.weights,
|
|
490
|
+
ess=self.get_ess(),
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
def get_particles_cpu(self) -> NDArray[np.floating]:
|
|
494
|
+
"""Get particles on CPU."""
|
|
495
|
+
return to_cpu(self.particles)
|
|
496
|
+
|
|
497
|
+
def get_weights_cpu(self) -> NDArray[np.floating]:
|
|
498
|
+
"""Get weights on CPU."""
|
|
499
|
+
return to_cpu(self.weights)
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
@requires("cupy", extra="gpu", feature="GPU particle filter")
|
|
503
|
+
def batch_particle_filter_update(
|
|
504
|
+
particles: ArrayLike,
|
|
505
|
+
weights: ArrayLike,
|
|
506
|
+
measurements: ArrayLike,
|
|
507
|
+
likelihood_fn: Callable[
|
|
508
|
+
[NDArray[np.floating[Any]], NDArray[np.floating[Any]]],
|
|
509
|
+
NDArray[np.floating[Any]],
|
|
510
|
+
],
|
|
511
|
+
) -> Tuple[
|
|
512
|
+
NDArray[np.floating[Any]], NDArray[np.floating[Any]], NDArray[np.floating[Any]]
|
|
513
|
+
]:
|
|
514
|
+
"""
|
|
515
|
+
Batch update for multiple particle filters.
|
|
516
|
+
|
|
517
|
+
Parameters
|
|
518
|
+
----------
|
|
519
|
+
particles : array_like
|
|
520
|
+
Particle states, shape (n_filters, n_particles, state_dim).
|
|
521
|
+
weights : array_like
|
|
522
|
+
Particle weights, shape (n_filters, n_particles).
|
|
523
|
+
measurements : array_like
|
|
524
|
+
Measurements, shape (n_filters, meas_dim).
|
|
525
|
+
likelihood_fn : callable
|
|
526
|
+
Function that computes likelihood for each particle.
|
|
527
|
+
|
|
528
|
+
Returns
|
|
529
|
+
-------
|
|
530
|
+
weights_updated : ndarray
|
|
531
|
+
Updated weights.
|
|
532
|
+
log_likelihoods : ndarray
|
|
533
|
+
Log likelihoods for each filter.
|
|
534
|
+
ess : ndarray
|
|
535
|
+
Effective sample sizes.
|
|
536
|
+
"""
|
|
537
|
+
cp = import_optional("cupy", extra="gpu", feature="GPU particle filter")
|
|
538
|
+
|
|
539
|
+
particles_gpu = ensure_gpu_array(particles, dtype=cp.float64)
|
|
540
|
+
weights_gpu = ensure_gpu_array(weights, dtype=cp.float64)
|
|
541
|
+
measurements_gpu = ensure_gpu_array(measurements, dtype=cp.float64)
|
|
542
|
+
|
|
543
|
+
n_filters = particles_gpu.shape[0]
|
|
544
|
+
|
|
545
|
+
weights_updated = cp.zeros_like(weights_gpu)
|
|
546
|
+
log_likelihoods = cp.zeros(n_filters, dtype=cp.float64)
|
|
547
|
+
ess = cp.zeros(n_filters, dtype=cp.float64)
|
|
548
|
+
|
|
549
|
+
for i in range(n_filters):
|
|
550
|
+
# Compute likelihoods
|
|
551
|
+
likelihoods = likelihood_fn(particles_gpu[i], measurements_gpu[i])
|
|
552
|
+
likelihoods = ensure_gpu_array(likelihoods, dtype=cp.float64)
|
|
553
|
+
|
|
554
|
+
# Update weights
|
|
555
|
+
log_weights = cp.log(weights_gpu[i]) + cp.log(likelihoods + 1e-300)
|
|
556
|
+
|
|
557
|
+
# Normalize
|
|
558
|
+
max_log_w = cp.max(log_weights)
|
|
559
|
+
log_sum = max_log_w + cp.log(cp.sum(cp.exp(log_weights - max_log_w)))
|
|
560
|
+
weights_updated[i] = cp.exp(log_weights - log_sum)
|
|
561
|
+
log_likelihoods[i] = log_sum
|
|
562
|
+
|
|
563
|
+
# ESS
|
|
564
|
+
ess[i] = 1.0 / cp.sum(weights_updated[i] ** 2)
|
|
565
|
+
|
|
566
|
+
return weights_updated, log_likelihoods, ess
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
__all__ = [
|
|
570
|
+
"ParticleFilterState",
|
|
571
|
+
"gpu_effective_sample_size",
|
|
572
|
+
"gpu_resample_systematic",
|
|
573
|
+
"gpu_resample_multinomial",
|
|
574
|
+
"gpu_resample_stratified",
|
|
575
|
+
"gpu_normalize_weights",
|
|
576
|
+
"CuPyParticleFilter",
|
|
577
|
+
"batch_particle_filter_update",
|
|
578
|
+
]
|