nrl-tracker 1.6.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,452 @@
1
+ """Gaussian Sum Filter (GSF) for nonlinear state estimation.
2
+
3
+ The Gaussian Sum Filter represents the posterior distribution as a weighted
4
+ mixture of Gaussians. This allows approximation of multi-modal distributions
5
+ and nonlinear systems better than a single EKF.
6
+
7
+ Each component has:
8
+ - Weight (w_i): Probability of that Gaussian
9
+ - Mean (μ_i): Component state estimate
10
+ - Covariance (P_i): Component uncertainty
11
+
12
+ The filter performs:
13
+ 1. Predict: Propagate each component independently
14
+ 2. Update: Update each component with measurement, adapt weights
15
+ 3. Manage: Prune low-weight components, merge similar ones
16
+
17
+ References
18
+ ----------
19
+ Bar-Shalom, Y., Li, X. R., & Kirubarajan, T. (2001). Estimation with
20
+ Applications to Tracking and Navigation. Wiley-Interscience.
21
+ """
22
+
23
+ from typing import Callable, List, NamedTuple
24
+
25
+ import numpy as np
26
+ from numpy.typing import ArrayLike, NDArray
27
+
28
+ from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
29
+
30
+
31
+ class GaussianComponent(NamedTuple):
32
+ """Single Gaussian component in mixture."""
33
+
34
+ x: NDArray # State estimate
35
+ P: NDArray # Covariance
36
+ w: float # Weight (probability)
37
+
38
+
39
+ class GaussianSumFilter:
40
+ """Gaussian Sum Filter for nonlinear state estimation.
41
+
42
+ A mixture model approach that represents the posterior distribution
43
+ as a weighted sum of Gaussians. Useful for multi-modal distributions
44
+ and nonlinear systems.
45
+
46
+ Attributes
47
+ ----------
48
+ components : list[GaussianComponent]
49
+ Current mixture components (state, covariance, weight).
50
+ max_components : int
51
+ Maximum components to maintain (via pruning/merging).
52
+ merge_threshold : float
53
+ KL divergence threshold for merging components.
54
+ prune_threshold : float
55
+ Weight threshold below which to prune components.
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ max_components: int = 5,
61
+ merge_threshold: float = 0.01,
62
+ prune_threshold: float = 1e-3,
63
+ ):
64
+ """Initialize Gaussian Sum Filter.
65
+
66
+ Parameters
67
+ ----------
68
+ max_components : int
69
+ Maximum number of Gaussian components to maintain.
70
+ merge_threshold : float
71
+ KL divergence threshold for merging. Components with KL
72
+ divergence below this are merged.
73
+ prune_threshold : float
74
+ Weight threshold for pruning. Components with weight below
75
+ this are removed.
76
+ """
77
+ self.components: List[GaussianComponent] = []
78
+ self.max_components = max_components
79
+ self.merge_threshold = merge_threshold
80
+ self.prune_threshold = prune_threshold
81
+
82
+ def initialize(
83
+ self,
84
+ x0: ArrayLike,
85
+ P0: ArrayLike,
86
+ num_components: int = 1,
87
+ ) -> None:
88
+ """Initialize filter with initial state.
89
+
90
+ Parameters
91
+ ----------
92
+ x0 : array_like
93
+ Initial state estimate, shape (n,).
94
+ P0 : array_like
95
+ Initial covariance, shape (n, n).
96
+ num_components : int
97
+ Number of components to initialize with. If > 1, will
98
+ create multiple components with slightly perturbed means.
99
+ """
100
+ x0 = np.asarray(x0, dtype=np.float64)
101
+ P0 = np.asarray(P0, dtype=np.float64)
102
+
103
+ self.components = []
104
+ weight = 1.0 / num_components
105
+
106
+ for i in range(num_components):
107
+ if i == 0:
108
+ x = x0.copy()
109
+ else:
110
+ # Slight perturbation for diversity
111
+ x = x0 + np.random.randn(x0.shape[0]) * np.sqrt(
112
+ np.diag(P0)
113
+ ) * 0.1
114
+
115
+ self.components.append(GaussianComponent(x=x, P=P0.copy(), w=weight))
116
+
117
+ def predict(
118
+ self,
119
+ f: Callable,
120
+ F: ArrayLike,
121
+ Q: ArrayLike,
122
+ ) -> None:
123
+ """Predict step: propagate each component.
124
+
125
+ Parameters
126
+ ----------
127
+ f : callable
128
+ Nonlinear state transition function f(x).
129
+ F : array_like
130
+ Jacobian of f, shape (n, n).
131
+ Q : array_like
132
+ Process noise covariance, shape (n, n).
133
+ """
134
+ F = np.asarray(F, dtype=np.float64)
135
+ Q = np.asarray(Q, dtype=np.float64)
136
+
137
+ new_components = []
138
+ for comp in self.components:
139
+ # EKF predict for each component
140
+ pred = ekf_predict(comp.x, comp.P, f, F, Q)
141
+ new_components.append(
142
+ GaussianComponent(x=pred.x, P=pred.P, w=comp.w)
143
+ )
144
+
145
+ self.components = new_components
146
+
147
+ def update(
148
+ self,
149
+ z: ArrayLike,
150
+ h: Callable,
151
+ H: ArrayLike,
152
+ R: ArrayLike,
153
+ ) -> None:
154
+ """Update step: update each component, adapt weights.
155
+
156
+ Parameters
157
+ ----------
158
+ z : array_like
159
+ Measurement, shape (m,).
160
+ h : callable
161
+ Nonlinear measurement function h(x).
162
+ H : array_like
163
+ Jacobian of h, shape (m, n).
164
+ R : array_like
165
+ Measurement noise covariance, shape (m, m).
166
+ """
167
+ z = np.asarray(z, dtype=np.float64)
168
+ H = np.asarray(H, dtype=np.float64)
169
+ R = np.asarray(R, dtype=np.float64)
170
+
171
+ # Update each component and compute likelihoods
172
+ likelihoods = []
173
+ updated_components = []
174
+
175
+ for comp in self.components:
176
+ # EKF update for component
177
+ upd = ekf_update(comp.x, comp.P, z, h, H, R)
178
+
179
+ # Likelihood from this measurement
180
+ likelihood = upd.likelihood
181
+
182
+ updated_components.append(
183
+ GaussianComponent(x=upd.x, P=upd.P, w=comp.w)
184
+ )
185
+ likelihoods.append(likelihood)
186
+
187
+ # Adapt weights based on measurement likelihood
188
+ likelihoods = np.array(likelihoods)
189
+ weights = np.array([c.w for c in updated_components])
190
+
191
+ # Normalize weights by likelihood (Bayesian update)
192
+ weights = weights * likelihoods
193
+ weight_sum = np.sum(weights)
194
+
195
+ if weight_sum > 0:
196
+ weights = weights / weight_sum
197
+ else:
198
+ # Fallback: equal weights if all likelihoods zero
199
+ weights = np.ones(len(updated_components)) / len(updated_components)
200
+
201
+ # Update components with new weights
202
+ self.components = [
203
+ GaussianComponent(x=c.x, P=c.P, w=w)
204
+ for c, w in zip(updated_components, weights)
205
+ ]
206
+
207
+ # Manage components (prune, merge)
208
+ self._prune_components()
209
+ self._merge_components()
210
+
211
+ def _prune_components(self) -> None:
212
+ """Remove components with weight below threshold."""
213
+ self.components = [
214
+ c for c in self.components if c.w >= self.prune_threshold
215
+ ]
216
+
217
+ if len(self.components) == 0:
218
+ # Failsafe: keep best component
219
+ self.components = [
220
+ max(self.components, key=lambda c: c.w)
221
+ ]
222
+
223
+ # Renormalize weights
224
+ total_weight = sum(c.w for c in self.components)
225
+ if total_weight > 0:
226
+ self.components = [
227
+ GaussianComponent(x=c.x, P=c.P, w=c.w / total_weight)
228
+ for c in self.components
229
+ ]
230
+
231
+ def _merge_components(self) -> None:
232
+ """Merge similar components to keep count manageable."""
233
+ while len(self.components) > self.max_components:
234
+ # Find pair with smallest KL divergence
235
+ best_i, best_j, best_kl = 0, 1, float('inf')
236
+
237
+ for i in range(len(self.components)):
238
+ for j in range(i + 1, len(self.components)):
239
+ kl = self._kl_divergence(
240
+ self.components[i], self.components[j]
241
+ )
242
+ if kl < best_kl:
243
+ best_kl = kl
244
+ best_i = best_j = i
245
+ best_j = j
246
+
247
+ if best_kl < self.merge_threshold:
248
+ # Merge components i and j
249
+ ci = self.components[best_i]
250
+ cj = self.components[best_j]
251
+
252
+ # Merged weight
253
+ w_new = ci.w + cj.w
254
+
255
+ # Merged mean (weighted average)
256
+ x_new = (ci.w * ci.x + cj.w * cj.x) / w_new
257
+
258
+ # Merged covariance (weighted average of covariances
259
+ # plus covariance of means)
260
+ P_new = (ci.w * ci.P + cj.w * cj.P) / w_new
261
+ dx_i = ci.x - x_new
262
+ dx_j = cj.x - x_new
263
+ P_new += (
264
+ ci.w * np.outer(dx_i, dx_i)
265
+ + cj.w * np.outer(dx_j, dx_j)
266
+ ) / w_new
267
+
268
+ # Create merged component
269
+ merged = GaussianComponent(x=x_new, P=P_new, w=w_new)
270
+
271
+ # Replace with merged, remove old
272
+ self.components = [
273
+ c for i, c in enumerate(self.components)
274
+ if i != best_i and i != best_j
275
+ ]
276
+ self.components.append(merged)
277
+ else:
278
+ # Can't merge more, stop
279
+ break
280
+
281
+ @staticmethod
282
+ def _kl_divergence(c1: GaussianComponent, c2: GaussianComponent) -> float:
283
+ """Compute KL divergence between two Gaussians.
284
+
285
+ KL(N1 || N2) = 0.5 * [tr(P2^{-1}P1) + (μ2-μ1)^T P2^{-1}
286
+ (μ2-μ1) - n + ln|P2|/|P1|]
287
+
288
+ Parameters
289
+ ----------
290
+ c1, c2 : GaussianComponent
291
+ Gaussian components.
292
+
293
+ Returns
294
+ -------
295
+ kl : float
296
+ KL divergence from c1 to c2.
297
+ """
298
+ dx = c2.x - c1.x
299
+ n = len(c1.x)
300
+
301
+ try:
302
+ P2_inv = np.linalg.inv(c2.P)
303
+ logdet_ratio = np.linalg.slogdet(c2.P)[1] - np.linalg.slogdet(
304
+ c1.P
305
+ )[1]
306
+
307
+ trace_term = np.trace(P2_inv @ c1.P)
308
+ quad_term = dx @ P2_inv @ dx
309
+ kl = 0.5 * (trace_term + quad_term - n + logdet_ratio)
310
+
311
+ return float(np.clip(kl, 0, np.inf))
312
+ except np.linalg.LinAlgError:
313
+ # Singular matrix, return large KL
314
+ return 1e6
315
+
316
+ def estimate(self) -> tuple[NDArray, NDArray]:
317
+ """Get overall state estimate (weighted mean and covariance).
318
+
319
+ Returns
320
+ -------
321
+ x : ndarray
322
+ Weighted mean of components.
323
+ P : ndarray
324
+ Weighted covariance of components.
325
+ """
326
+ if not self.components:
327
+ raise ValueError("No components initialized")
328
+
329
+ # Weighted mean
330
+ x_est = np.zeros_like(self.components[0].x)
331
+ for comp in self.components:
332
+ x_est += comp.w * comp.x
333
+
334
+ # Weighted covariance
335
+ P_est = np.zeros_like(self.components[0].P)
336
+ for comp in self.components:
337
+ dx = comp.x - x_est
338
+ P_est += comp.w * (comp.P + np.outer(dx, dx))
339
+
340
+ return x_est, P_est
341
+
342
+ def get_components(self) -> List[GaussianComponent]:
343
+ """Get current mixture components.
344
+
345
+ Returns
346
+ -------
347
+ components : list[GaussianComponent]
348
+ List of components with (x, P, w).
349
+ """
350
+ return self.components.copy()
351
+
352
+ def get_num_components(self) -> int:
353
+ """Get number of current components."""
354
+ return len(self.components)
355
+
356
+
357
+ def gaussian_sum_filter_predict(
358
+ components: List[GaussianComponent],
359
+ f: Callable,
360
+ F: ArrayLike,
361
+ Q: ArrayLike,
362
+ ) -> List[GaussianComponent]:
363
+ """Convenience function for GSF prediction.
364
+
365
+ Parameters
366
+ ----------
367
+ components : list[GaussianComponent]
368
+ Current mixture components.
369
+ f : callable
370
+ Nonlinear state transition function.
371
+ F : array_like
372
+ Jacobian of f.
373
+ Q : array_like
374
+ Process noise covariance.
375
+
376
+ Returns
377
+ -------
378
+ components_new : list[GaussianComponent]
379
+ Predicted components.
380
+ """
381
+ F = np.asarray(F, dtype=np.float64)
382
+ Q = np.asarray(Q, dtype=np.float64)
383
+
384
+ new_components = []
385
+ for comp in components:
386
+ pred = ekf_predict(comp.x, comp.P, f, F, Q)
387
+ new_components.append(
388
+ GaussianComponent(x=pred.x, P=pred.P, w=comp.w)
389
+ )
390
+
391
+ return new_components
392
+
393
+
394
+ def gaussian_sum_filter_update(
395
+ components: List[GaussianComponent],
396
+ z: ArrayLike,
397
+ h: Callable,
398
+ H: ArrayLike,
399
+ R: ArrayLike,
400
+ ) -> List[GaussianComponent]:
401
+ """Convenience function for GSF update.
402
+
403
+ Parameters
404
+ ----------
405
+ components : list[GaussianComponent]
406
+ Predicted mixture components.
407
+ z : array_like
408
+ Measurement.
409
+ h : callable
410
+ Nonlinear measurement function.
411
+ H : array_like
412
+ Jacobian of h.
413
+ R : array_like
414
+ Measurement noise covariance.
415
+
416
+ Returns
417
+ -------
418
+ components_new : list[GaussianComponent]
419
+ Updated components with adapted weights.
420
+ """
421
+ z = np.asarray(z, dtype=np.float64)
422
+ H = np.asarray(H, dtype=np.float64)
423
+ R = np.asarray(R, dtype=np.float64)
424
+
425
+ likelihoods = []
426
+ updated_components = []
427
+
428
+ for comp in components:
429
+ upd = ekf_update(comp.x, comp.P, z, h, H, R)
430
+ likelihood = upd.likelihood
431
+
432
+ updated_components.append(
433
+ GaussianComponent(x=upd.x, P=upd.P, w=comp.w)
434
+ )
435
+ likelihoods.append(likelihood)
436
+
437
+ # Adapt weights
438
+ likelihoods = np.array(likelihoods)
439
+ weights = np.array([c.w for c in updated_components])
440
+
441
+ weights = weights * likelihoods
442
+ weight_sum = np.sum(weights)
443
+
444
+ if weight_sum > 0:
445
+ weights = weights / weight_sum
446
+ else:
447
+ weights = np.ones(len(updated_components)) / len(updated_components)
448
+
449
+ return [
450
+ GaussianComponent(x=c.x, P=c.P, w=w)
451
+ for c, w in zip(updated_components, weights)
452
+ ]
@@ -4,6 +4,7 @@ Kalman filter family implementations.
4
4
  This module provides:
5
5
  - Linear Kalman filter (predict, update, smoothing)
6
6
  - Extended Kalman filter (EKF)
7
+ - Constrained Extended Kalman filter (CEKF)
7
8
  - Unscented Kalman filter (UKF)
8
9
  - Cubature Kalman filter (CKF)
9
10
  - Information filter
@@ -12,6 +13,12 @@ This module provides:
12
13
  - H-infinity filter (robust filtering)
13
14
  """
14
15
 
16
+ from pytcl.dynamic_estimation.kalman.constrained import (
17
+ ConstraintFunction,
18
+ ConstrainedEKF,
19
+ constrained_ekf_predict,
20
+ constrained_ekf_update,
21
+ )
15
22
  from pytcl.dynamic_estimation.kalman.extended import (
16
23
  ekf_predict,
17
24
  ekf_predict_auto,
@@ -71,6 +78,11 @@ from pytcl.dynamic_estimation.kalman.unscented import (
71
78
  )
72
79
 
73
80
  __all__ = [
81
+ # Constrained EKF
82
+ "ConstraintFunction",
83
+ "ConstrainedEKF",
84
+ "constrained_ekf_predict",
85
+ "constrained_ekf_update",
74
86
  # Linear KF
75
87
  "KalmanState",
76
88
  "KalmanPrediction",