nrl-tracker 1.6.0__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.0.dist-info}/METADATA +13 -10
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.0.dist-info}/RECORD +20 -13
- pytcl/assignment_algorithms/__init__.py +28 -0
- pytcl/assignment_algorithms/nd_assignment.py +378 -0
- pytcl/assignment_algorithms/network_flow.py +361 -0
- pytcl/astronomical/__init__.py +35 -0
- pytcl/astronomical/reference_frames.py +102 -0
- pytcl/astronomical/special_orbits.py +536 -0
- pytcl/atmosphere/__init__.py +11 -0
- pytcl/atmosphere/nrlmsise00.py +808 -0
- pytcl/coordinate_systems/conversions/geodetic.py +248 -5
- pytcl/dynamic_estimation/__init__.py +26 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +452 -0
- pytcl/dynamic_estimation/kalman/__init__.py +12 -0
- pytcl/dynamic_estimation/kalman/constrained.py +370 -0
- pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
- pytcl/dynamic_estimation/rbpf.py +593 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,593 @@
|
|
|
1
|
+
"""Rao-Blackwellized Particle Filter (RBPF).
|
|
2
|
+
|
|
3
|
+
The RBPF partitions the state into a nonlinear part (handled by particles) and
|
|
4
|
+
a linear part (handled by Kalman filters for each particle). This provides
|
|
5
|
+
better estimation quality than plain particle filters for systems with both
|
|
6
|
+
nonlinear and linear dynamics.
|
|
7
|
+
|
|
8
|
+
The algorithm:
|
|
9
|
+
1. Maintain N particles, each with:
|
|
10
|
+
- Position in nonlinear state space (y)
|
|
11
|
+
- Kalman filter state (x, P) for linear subspace
|
|
12
|
+
- Weight w based on measurement likelihood
|
|
13
|
+
2. For each time step:
|
|
14
|
+
- Predict: Propagate nonlinear particles, update KF for each
|
|
15
|
+
- Update: Compute measurement likelihood, adapt weights
|
|
16
|
+
- Resample: When effective sample size is low, draw new particles
|
|
17
|
+
- Merge: Combine nearby particles to reduce variance
|
|
18
|
+
|
|
19
|
+
References:
|
|
20
|
+
- Doucet et al., "On Sequential Monte Carlo Sampling with Adaptive Weights"
|
|
21
|
+
(Doucet & Tadic, 2003)
|
|
22
|
+
- Andrieu et al., "Particle Methods for Change Detection, System Identification"
|
|
23
|
+
(IEEE SPM, 2004)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from typing import Callable, NamedTuple
|
|
27
|
+
|
|
28
|
+
import numpy as np
|
|
29
|
+
from numpy.typing import NDArray
|
|
30
|
+
|
|
31
|
+
from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class RBPFParticle(NamedTuple):
|
|
35
|
+
"""Rao-Blackwellized particle with nonlinear and linear components.
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
y : NDArray
|
|
40
|
+
Nonlinear state component (propagated by particle transition)
|
|
41
|
+
x : NDArray
|
|
42
|
+
Linear state component (estimated by Kalman filter for this particle)
|
|
43
|
+
P : NDArray
|
|
44
|
+
Covariance of linear state component
|
|
45
|
+
w : float
|
|
46
|
+
Particle weight (typically normalized to sum to 1)
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
y: NDArray
|
|
50
|
+
x: NDArray
|
|
51
|
+
P: NDArray
|
|
52
|
+
w: float
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class RBPFFilter:
|
|
56
|
+
"""Rao-Blackwellized Particle Filter.
|
|
57
|
+
|
|
58
|
+
Combines particle filtering for nonlinear states with Kalman filtering
|
|
59
|
+
for conditionally-linear states. For a system partitioned as:
|
|
60
|
+
- y: nonlinear state (particles)
|
|
61
|
+
- x: linear state given y (Kalman filter)
|
|
62
|
+
|
|
63
|
+
Attributes
|
|
64
|
+
----------
|
|
65
|
+
particles : list[RBPFParticle]
|
|
66
|
+
Current particles with nonlinear/linear states and weights
|
|
67
|
+
max_particles : int
|
|
68
|
+
Maximum number of particles (default 100)
|
|
69
|
+
resample_threshold : float
|
|
70
|
+
Resample when N_eff < resample_threshold * N (default 0.5)
|
|
71
|
+
merge_threshold : float
|
|
72
|
+
Merge nearby particles when KL divergence < threshold (default 0.5)
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(
|
|
76
|
+
self,
|
|
77
|
+
max_particles: int = 100,
|
|
78
|
+
resample_threshold: float = 0.5,
|
|
79
|
+
merge_threshold: float = 0.5,
|
|
80
|
+
):
|
|
81
|
+
"""Initialize RBPF.
|
|
82
|
+
|
|
83
|
+
Parameters
|
|
84
|
+
----------
|
|
85
|
+
max_particles : int
|
|
86
|
+
Maximum number of particles to maintain
|
|
87
|
+
resample_threshold : float
|
|
88
|
+
Resample threshold as fraction of max particles
|
|
89
|
+
merge_threshold : float
|
|
90
|
+
KL divergence threshold for merging particles
|
|
91
|
+
"""
|
|
92
|
+
self.particles: list[RBPFParticle] = []
|
|
93
|
+
self.max_particles = max_particles
|
|
94
|
+
self.resample_threshold = resample_threshold
|
|
95
|
+
self.merge_threshold = merge_threshold
|
|
96
|
+
|
|
97
|
+
def initialize(
|
|
98
|
+
self,
|
|
99
|
+
y0: NDArray,
|
|
100
|
+
x0: NDArray,
|
|
101
|
+
P0: NDArray,
|
|
102
|
+
num_particles: int = 100,
|
|
103
|
+
) -> None:
|
|
104
|
+
"""Initialize particles.
|
|
105
|
+
|
|
106
|
+
Parameters
|
|
107
|
+
----------
|
|
108
|
+
y0 : NDArray
|
|
109
|
+
Initial nonlinear state (broadcasted to all particles)
|
|
110
|
+
x0 : NDArray
|
|
111
|
+
Initial linear state (broadcasted to all particles)
|
|
112
|
+
P0 : NDArray
|
|
113
|
+
Initial linear state covariance (same for all particles)
|
|
114
|
+
num_particles : int
|
|
115
|
+
Number of particles to initialize
|
|
116
|
+
"""
|
|
117
|
+
self.particles = []
|
|
118
|
+
weight = 1.0 / num_particles
|
|
119
|
+
|
|
120
|
+
# Add small noise to particle y values to break ties
|
|
121
|
+
ny = y0.shape[0]
|
|
122
|
+
|
|
123
|
+
for i in range(num_particles):
|
|
124
|
+
# Nonlinear component: small perturbation around y0
|
|
125
|
+
y = y0 + np.random.randn(ny) * 1e-6
|
|
126
|
+
# Linear component: same for all particles (improved by update)
|
|
127
|
+
x = x0.copy()
|
|
128
|
+
P = P0.copy()
|
|
129
|
+
|
|
130
|
+
particle = RBPFParticle(y=y, x=x, P=P, w=weight)
|
|
131
|
+
self.particles.append(particle)
|
|
132
|
+
|
|
133
|
+
def predict(
|
|
134
|
+
self,
|
|
135
|
+
g: Callable[[NDArray], NDArray],
|
|
136
|
+
G: NDArray,
|
|
137
|
+
Qy: NDArray,
|
|
138
|
+
f: Callable[[NDArray, NDArray], NDArray],
|
|
139
|
+
F: NDArray,
|
|
140
|
+
Qx: NDArray,
|
|
141
|
+
) -> None:
|
|
142
|
+
"""Predict step: propagate particles and linear states.
|
|
143
|
+
|
|
144
|
+
Parameters
|
|
145
|
+
----------
|
|
146
|
+
g : callable
|
|
147
|
+
Nonlinear state transition: y[k+1] = g(y[k])
|
|
148
|
+
G : NDArray
|
|
149
|
+
Jacobian of g with respect to y (for covariance propagation)
|
|
150
|
+
Qy : NDArray
|
|
151
|
+
Process noise covariance for nonlinear state
|
|
152
|
+
f : callable
|
|
153
|
+
Linear transition: x[k+1] = f(x[k], y[k])
|
|
154
|
+
F : NDArray
|
|
155
|
+
Jacobian matrix dF/dx (linearized around y)
|
|
156
|
+
Qx : NDArray
|
|
157
|
+
Process noise covariance for linear state
|
|
158
|
+
"""
|
|
159
|
+
new_particles = []
|
|
160
|
+
|
|
161
|
+
for particle in self.particles:
|
|
162
|
+
# Predict nonlinear component
|
|
163
|
+
y_pred = g(particle.y)
|
|
164
|
+
# Add process noise
|
|
165
|
+
y_pred = y_pred + np.random.multivariate_normal(
|
|
166
|
+
np.zeros(y_pred.shape[0]), Qy
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Create wrapper for linear dynamics with current y_pred
|
|
170
|
+
def f_wrapper(x):
|
|
171
|
+
return f(x, y_pred)
|
|
172
|
+
|
|
173
|
+
# Predict linear component using EKF
|
|
174
|
+
pred = ekf_predict(particle.x, particle.P, f_wrapper, F, Qx)
|
|
175
|
+
|
|
176
|
+
new_particle = RBPFParticle(
|
|
177
|
+
y=y_pred,
|
|
178
|
+
x=pred.x,
|
|
179
|
+
P=pred.P,
|
|
180
|
+
w=particle.w,
|
|
181
|
+
)
|
|
182
|
+
new_particles.append(new_particle)
|
|
183
|
+
|
|
184
|
+
self.particles = new_particles
|
|
185
|
+
|
|
186
|
+
def update(
|
|
187
|
+
self,
|
|
188
|
+
z: NDArray,
|
|
189
|
+
h: Callable[[NDArray, NDArray], NDArray],
|
|
190
|
+
H: NDArray,
|
|
191
|
+
R: NDArray,
|
|
192
|
+
) -> None:
|
|
193
|
+
"""Update step: adapt particle weights based on measurement.
|
|
194
|
+
|
|
195
|
+
Parameters
|
|
196
|
+
----------
|
|
197
|
+
z : NDArray
|
|
198
|
+
Measurement vector
|
|
199
|
+
h : callable
|
|
200
|
+
Measurement function: z = h(x, y)
|
|
201
|
+
H : NDArray
|
|
202
|
+
Jacobian matrix dH/dx (measurement sensitivity)
|
|
203
|
+
R : NDArray
|
|
204
|
+
Measurement noise covariance
|
|
205
|
+
"""
|
|
206
|
+
weights = np.zeros(len(self.particles))
|
|
207
|
+
new_particles = []
|
|
208
|
+
|
|
209
|
+
for i, particle in enumerate(self.particles):
|
|
210
|
+
# Create wrapper for measurement function with current y
|
|
211
|
+
def h_wrapper(x):
|
|
212
|
+
return h(x, particle.y)
|
|
213
|
+
|
|
214
|
+
# Update linear component (Kalman update)
|
|
215
|
+
upd = ekf_update(particle.x, particle.P, z, h_wrapper, H, R)
|
|
216
|
+
|
|
217
|
+
# Weight: measurement likelihood from Kalman update
|
|
218
|
+
likelihood = upd.likelihood
|
|
219
|
+
|
|
220
|
+
# Unnormalized weight
|
|
221
|
+
weights[i] = particle.w * likelihood
|
|
222
|
+
|
|
223
|
+
new_particle = RBPFParticle(
|
|
224
|
+
y=particle.y,
|
|
225
|
+
x=upd.x,
|
|
226
|
+
P=upd.P,
|
|
227
|
+
w=particle.w, # Will renormalize below
|
|
228
|
+
)
|
|
229
|
+
new_particles.append(new_particle)
|
|
230
|
+
|
|
231
|
+
# Normalize weights
|
|
232
|
+
w_sum = np.sum(weights)
|
|
233
|
+
if w_sum > 0:
|
|
234
|
+
weights = weights / w_sum
|
|
235
|
+
else:
|
|
236
|
+
# Uniform weights if all likelihoods are zero
|
|
237
|
+
weights = np.ones(len(self.particles)) / len(self.particles)
|
|
238
|
+
|
|
239
|
+
# Update particles with new weights
|
|
240
|
+
self.particles = [
|
|
241
|
+
RBPFParticle(y=p.y, x=p.x, P=p.P, w=w)
|
|
242
|
+
for p, w in zip(new_particles, weights)
|
|
243
|
+
]
|
|
244
|
+
|
|
245
|
+
# Resample if needed
|
|
246
|
+
self._resample_if_needed()
|
|
247
|
+
|
|
248
|
+
# Merge if too many particles
|
|
249
|
+
self._merge_particles()
|
|
250
|
+
|
|
251
|
+
def estimate(self) -> tuple[NDArray, NDArray]:
|
|
252
|
+
"""Estimate state as weighted mean and covariance.
|
|
253
|
+
|
|
254
|
+
Returns
|
|
255
|
+
-------
|
|
256
|
+
y_est : NDArray
|
|
257
|
+
Weighted mean of nonlinear components
|
|
258
|
+
x_est : NDArray
|
|
259
|
+
Weighted mean of linear components
|
|
260
|
+
P_est : NDArray
|
|
261
|
+
Weighted covariance (includes mixture and linear uncertainties)
|
|
262
|
+
"""
|
|
263
|
+
if not self.particles:
|
|
264
|
+
raise ValueError("No particles to estimate")
|
|
265
|
+
|
|
266
|
+
weights = np.array([p.w for p in self.particles])
|
|
267
|
+
|
|
268
|
+
# Nonlinear state: weighted mean
|
|
269
|
+
y_particles = np.array([p.y for p in self.particles])
|
|
270
|
+
y_est = np.average(y_particles, axis=0, weights=weights)
|
|
271
|
+
|
|
272
|
+
# Linear state: weighted mean and covariance
|
|
273
|
+
x_particles = np.array([p.x for p in self.particles])
|
|
274
|
+
x_est = np.average(x_particles, axis=0, weights=weights)
|
|
275
|
+
|
|
276
|
+
# Covariance: E[(x - x_est)(x - x_est)^T] = E[Cov[x|y]] + Cov[E[x|y]]
|
|
277
|
+
# = weighted_mean(P) + weighted_cov(x)
|
|
278
|
+
|
|
279
|
+
# Weighted mean of covariances
|
|
280
|
+
P_mean = np.zeros((self.particles[0].P.shape[0], self.particles[0].P.shape[1]))
|
|
281
|
+
for p in self.particles:
|
|
282
|
+
P_mean += p.w * p.P
|
|
283
|
+
|
|
284
|
+
# Weighted covariance of means
|
|
285
|
+
P_cov = np.zeros((self.particles[0].P.shape[0], self.particles[0].P.shape[1]))
|
|
286
|
+
for p in self.particles:
|
|
287
|
+
dx = p.x - x_est
|
|
288
|
+
P_cov += p.w * np.outer(dx, dx)
|
|
289
|
+
|
|
290
|
+
P_est = P_mean + P_cov
|
|
291
|
+
|
|
292
|
+
return y_est, x_est, P_est
|
|
293
|
+
|
|
294
|
+
def get_particles(self) -> list[RBPFParticle]:
|
|
295
|
+
"""Get current particles.
|
|
296
|
+
|
|
297
|
+
Returns
|
|
298
|
+
-------
|
|
299
|
+
list[RBPFParticle]
|
|
300
|
+
Current particle list
|
|
301
|
+
"""
|
|
302
|
+
return self.particles.copy()
|
|
303
|
+
|
|
304
|
+
def _resample_if_needed(self) -> None:
|
|
305
|
+
"""Resample particles if effective sample size is too low.
|
|
306
|
+
|
|
307
|
+
Uses systematic resampling to reduce variance.
|
|
308
|
+
"""
|
|
309
|
+
weights = np.array([p.w for p in self.particles])
|
|
310
|
+
|
|
311
|
+
# Effective sample size
|
|
312
|
+
N_eff = 1.0 / np.sum(weights**2)
|
|
313
|
+
|
|
314
|
+
threshold = self.resample_threshold * len(self.particles)
|
|
315
|
+
|
|
316
|
+
if N_eff < threshold:
|
|
317
|
+
self._systematic_resample()
|
|
318
|
+
|
|
319
|
+
def _systematic_resample(self) -> None:
|
|
320
|
+
"""Perform systematic resampling."""
|
|
321
|
+
weights = np.array([p.w for p in self.particles])
|
|
322
|
+
n = len(self.particles)
|
|
323
|
+
|
|
324
|
+
# Cumulative sum
|
|
325
|
+
cs = np.cumsum(weights)
|
|
326
|
+
|
|
327
|
+
# Resample indices
|
|
328
|
+
indices = []
|
|
329
|
+
u = np.random.uniform(0, 1.0 / n)
|
|
330
|
+
|
|
331
|
+
j = 0
|
|
332
|
+
for i in range(n):
|
|
333
|
+
while u > cs[j]:
|
|
334
|
+
j += 1
|
|
335
|
+
indices.append(j)
|
|
336
|
+
u += 1.0 / n
|
|
337
|
+
|
|
338
|
+
# Create new particles with uniform weights
|
|
339
|
+
new_particles = []
|
|
340
|
+
weight = 1.0 / n
|
|
341
|
+
|
|
342
|
+
for idx in indices:
|
|
343
|
+
p = self.particles[idx]
|
|
344
|
+
new_particles.append(
|
|
345
|
+
RBPFParticle(y=p.y.copy(), x=p.x.copy(), P=p.P.copy(), w=weight)
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
self.particles = new_particles
|
|
349
|
+
|
|
350
|
+
def _merge_particles(self) -> None:
|
|
351
|
+
"""Merge nearby particles to reduce variance."""
|
|
352
|
+
if len(self.particles) <= 1:
|
|
353
|
+
return
|
|
354
|
+
|
|
355
|
+
# Find closest pair by KL divergence
|
|
356
|
+
max_iter = len(self.particles) - self.max_particles
|
|
357
|
+
|
|
358
|
+
for _ in range(max_iter):
|
|
359
|
+
if len(self.particles) <= self.max_particles:
|
|
360
|
+
break
|
|
361
|
+
|
|
362
|
+
best_div = np.inf
|
|
363
|
+
best_i, best_j = 0, 1
|
|
364
|
+
|
|
365
|
+
# Find closest pair
|
|
366
|
+
for i in range(len(self.particles)):
|
|
367
|
+
for j in range(i + 1, len(self.particles)):
|
|
368
|
+
div = self._kl_divergence(
|
|
369
|
+
self.particles[i].P,
|
|
370
|
+
self.particles[j].P,
|
|
371
|
+
self.particles[i].x,
|
|
372
|
+
self.particles[j].x,
|
|
373
|
+
)
|
|
374
|
+
if div < best_div:
|
|
375
|
+
best_div = div
|
|
376
|
+
best_i, best_j = i, j
|
|
377
|
+
|
|
378
|
+
if best_div < self.merge_threshold:
|
|
379
|
+
# Merge particles i and j
|
|
380
|
+
p_i = self.particles[best_i]
|
|
381
|
+
p_j = self.particles[best_j]
|
|
382
|
+
|
|
383
|
+
# Weighted merge
|
|
384
|
+
w_total = p_i.w + p_j.w
|
|
385
|
+
w_i = p_i.w / w_total
|
|
386
|
+
w_j = p_j.w / w_total
|
|
387
|
+
|
|
388
|
+
# Merged nonlinear state
|
|
389
|
+
y_merged = w_i * p_i.y + w_j * p_j.y
|
|
390
|
+
|
|
391
|
+
# Merged linear state and covariance
|
|
392
|
+
x_merged = w_i * p_i.x + w_j * p_j.x
|
|
393
|
+
|
|
394
|
+
# Merged covariance
|
|
395
|
+
P_merged = (
|
|
396
|
+
w_i * p_i.P
|
|
397
|
+
+ w_j * p_j.P
|
|
398
|
+
+ w_i * np.outer(p_i.x - x_merged, p_i.x - x_merged)
|
|
399
|
+
+ w_j * np.outer(p_j.x - x_merged, p_j.x - x_merged)
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
merged_particle = RBPFParticle(
|
|
403
|
+
y=y_merged, x=x_merged, P=P_merged, w=w_total
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
# Replace particles
|
|
407
|
+
if best_i < best_j:
|
|
408
|
+
self.particles[best_i] = merged_particle
|
|
409
|
+
self.particles.pop(best_j)
|
|
410
|
+
else:
|
|
411
|
+
self.particles[best_j] = merged_particle
|
|
412
|
+
self.particles.pop(best_i)
|
|
413
|
+
else:
|
|
414
|
+
break
|
|
415
|
+
|
|
416
|
+
# Renormalize weights
|
|
417
|
+
w_sum = sum(p.w for p in self.particles)
|
|
418
|
+
if w_sum > 0:
|
|
419
|
+
self.particles = [
|
|
420
|
+
RBPFParticle(y=p.y, x=p.x, P=p.P, w=p.w / w_sum)
|
|
421
|
+
for p in self.particles
|
|
422
|
+
]
|
|
423
|
+
|
|
424
|
+
@staticmethod
|
|
425
|
+
def _kl_divergence(
|
|
426
|
+
P1: NDArray, P2: NDArray, x1: NDArray, x2: NDArray
|
|
427
|
+
) -> float:
|
|
428
|
+
"""Compute KL divergence between two Gaussians.
|
|
429
|
+
|
|
430
|
+
KL(N(x1, P1) || N(x2, P2)) = 0.5 * [
|
|
431
|
+
trace(P2^-1 @ P1) + (x2-x1)^T @ P2^-1 @ (x2-x1) - n + ln(|P2|/|P1|)
|
|
432
|
+
]
|
|
433
|
+
|
|
434
|
+
Parameters
|
|
435
|
+
----------
|
|
436
|
+
P1 : NDArray
|
|
437
|
+
Covariance of first Gaussian
|
|
438
|
+
P2 : NDArray
|
|
439
|
+
Covariance of second Gaussian
|
|
440
|
+
x1 : NDArray
|
|
441
|
+
Mean of first Gaussian
|
|
442
|
+
x2 : NDArray
|
|
443
|
+
Mean of second Gaussian
|
|
444
|
+
|
|
445
|
+
Returns
|
|
446
|
+
-------
|
|
447
|
+
float
|
|
448
|
+
KL divergence (always >= 0)
|
|
449
|
+
"""
|
|
450
|
+
try:
|
|
451
|
+
P2_inv = np.linalg.inv(P2)
|
|
452
|
+
n = P1.shape[0]
|
|
453
|
+
|
|
454
|
+
# Trace term
|
|
455
|
+
trace_term = np.trace(P2_inv @ P1)
|
|
456
|
+
|
|
457
|
+
# Mean difference term
|
|
458
|
+
dx = x2 - x1
|
|
459
|
+
mean_term = dx @ P2_inv @ dx
|
|
460
|
+
|
|
461
|
+
# Determinant term
|
|
462
|
+
det_term = np.linalg.slogdet(P2)[1] - np.linalg.slogdet(P1)[1]
|
|
463
|
+
|
|
464
|
+
kl = 0.5 * (trace_term + mean_term - n + det_term)
|
|
465
|
+
return float(np.maximum(kl, 0.0)) # Ensure non-negative
|
|
466
|
+
except (np.linalg.LinAlgError, ValueError):
|
|
467
|
+
return np.inf
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
# Convenience functions for functional interface
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def rbpf_predict(
|
|
474
|
+
particles: list[RBPFParticle],
|
|
475
|
+
g: Callable[[NDArray], NDArray],
|
|
476
|
+
G: NDArray,
|
|
477
|
+
Qy: NDArray,
|
|
478
|
+
f: Callable[[NDArray, NDArray], NDArray],
|
|
479
|
+
F: NDArray,
|
|
480
|
+
Qx: NDArray,
|
|
481
|
+
) -> list[RBPFParticle]:
|
|
482
|
+
"""Predict step for RBPF particles.
|
|
483
|
+
|
|
484
|
+
Parameters
|
|
485
|
+
----------
|
|
486
|
+
particles : list[RBPFParticle]
|
|
487
|
+
Current particles
|
|
488
|
+
g : callable
|
|
489
|
+
Nonlinear state transition
|
|
490
|
+
G : NDArray
|
|
491
|
+
Jacobian of nonlinear transition
|
|
492
|
+
Qy : NDArray
|
|
493
|
+
Process noise covariance for nonlinear state
|
|
494
|
+
f : callable
|
|
495
|
+
Linear state transition
|
|
496
|
+
F : NDArray
|
|
497
|
+
Jacobian of linear transition
|
|
498
|
+
Qx : NDArray
|
|
499
|
+
Process noise covariance for linear state
|
|
500
|
+
|
|
501
|
+
Returns
|
|
502
|
+
-------
|
|
503
|
+
list[RBPFParticle]
|
|
504
|
+
Predicted particles
|
|
505
|
+
"""
|
|
506
|
+
new_particles = []
|
|
507
|
+
|
|
508
|
+
for particle in particles:
|
|
509
|
+
# Predict nonlinear component
|
|
510
|
+
y_pred = g(particle.y)
|
|
511
|
+
y_pred = y_pred + np.random.multivariate_normal(
|
|
512
|
+
np.zeros(y_pred.shape[0]), Qy
|
|
513
|
+
)
|
|
514
|
+
|
|
515
|
+
# Create wrapper for linear dynamics with current y_pred
|
|
516
|
+
def f_wrapper(x):
|
|
517
|
+
return f(x, y_pred)
|
|
518
|
+
|
|
519
|
+
# Predict linear component
|
|
520
|
+
pred = ekf_predict(particle.x, particle.P, f_wrapper, F, Qx)
|
|
521
|
+
|
|
522
|
+
new_particle = RBPFParticle(
|
|
523
|
+
y=y_pred,
|
|
524
|
+
x=pred.x,
|
|
525
|
+
P=pred.P,
|
|
526
|
+
w=particle.w,
|
|
527
|
+
)
|
|
528
|
+
new_particles.append(new_particle)
|
|
529
|
+
|
|
530
|
+
return new_particles
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
def rbpf_update(
|
|
534
|
+
particles: list[RBPFParticle],
|
|
535
|
+
z: NDArray,
|
|
536
|
+
h: Callable[[NDArray, NDArray], NDArray],
|
|
537
|
+
H: NDArray,
|
|
538
|
+
R: NDArray,
|
|
539
|
+
) -> list[RBPFParticle]:
|
|
540
|
+
"""Update step for RBPF particles.
|
|
541
|
+
|
|
542
|
+
Parameters
|
|
543
|
+
----------
|
|
544
|
+
particles : list[RBPFParticle]
|
|
545
|
+
Predicted particles
|
|
546
|
+
z : NDArray
|
|
547
|
+
Measurement
|
|
548
|
+
h : callable
|
|
549
|
+
Measurement function
|
|
550
|
+
H : NDArray
|
|
551
|
+
Jacobian of measurement function
|
|
552
|
+
R : NDArray
|
|
553
|
+
Measurement noise covariance
|
|
554
|
+
|
|
555
|
+
Returns
|
|
556
|
+
-------
|
|
557
|
+
list[RBPFParticle]
|
|
558
|
+
Updated particles with adapted weights
|
|
559
|
+
"""
|
|
560
|
+
weights = np.zeros(len(particles))
|
|
561
|
+
new_particles = []
|
|
562
|
+
|
|
563
|
+
for i, particle in enumerate(particles):
|
|
564
|
+
# Create wrapper for measurement function with current y
|
|
565
|
+
def h_wrapper(x):
|
|
566
|
+
return h(x, particle.y)
|
|
567
|
+
|
|
568
|
+
# Update linear component
|
|
569
|
+
upd = ekf_update(particle.x, particle.P, z, h_wrapper, H, R)
|
|
570
|
+
|
|
571
|
+
# Weight by measurement likelihood
|
|
572
|
+
weights[i] = particle.w * upd.likelihood
|
|
573
|
+
|
|
574
|
+
new_particle = RBPFParticle(
|
|
575
|
+
y=particle.y,
|
|
576
|
+
x=upd.x,
|
|
577
|
+
P=upd.P,
|
|
578
|
+
w=particle.w,
|
|
579
|
+
)
|
|
580
|
+
new_particles.append(new_particle)
|
|
581
|
+
|
|
582
|
+
# Normalize weights
|
|
583
|
+
w_sum = np.sum(weights)
|
|
584
|
+
if w_sum > 0:
|
|
585
|
+
weights = weights / w_sum
|
|
586
|
+
else:
|
|
587
|
+
weights = np.ones(len(particles)) / len(particles)
|
|
588
|
+
|
|
589
|
+
# Update with new weights
|
|
590
|
+
return [
|
|
591
|
+
RBPFParticle(y=p.y, x=p.x, P=p.P, w=w)
|
|
592
|
+
for p, w in zip(new_particles, weights)
|
|
593
|
+
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|