nrl-tracker 0.22.5__py3-none-any.whl → 1.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/METADATA +57 -10
- {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/RECORD +86 -69
- pytcl/__init__.py +4 -3
- pytcl/assignment_algorithms/__init__.py +28 -0
- pytcl/assignment_algorithms/dijkstra_min_cost.py +184 -0
- pytcl/assignment_algorithms/gating.py +10 -10
- pytcl/assignment_algorithms/jpda.py +40 -40
- pytcl/assignment_algorithms/nd_assignment.py +379 -0
- pytcl/assignment_algorithms/network_flow.py +464 -0
- pytcl/assignment_algorithms/network_simplex.py +167 -0
- pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
- pytcl/astronomical/__init__.py +104 -3
- pytcl/astronomical/ephemerides.py +14 -11
- pytcl/astronomical/reference_frames.py +865 -56
- pytcl/astronomical/relativity.py +6 -5
- pytcl/astronomical/sgp4.py +710 -0
- pytcl/astronomical/special_orbits.py +532 -0
- pytcl/astronomical/tle.py +558 -0
- pytcl/atmosphere/__init__.py +43 -1
- pytcl/atmosphere/ionosphere.py +512 -0
- pytcl/atmosphere/nrlmsise00.py +809 -0
- pytcl/clustering/dbscan.py +2 -2
- pytcl/clustering/gaussian_mixture.py +3 -3
- pytcl/clustering/hierarchical.py +15 -15
- pytcl/clustering/kmeans.py +4 -4
- pytcl/containers/__init__.py +24 -0
- pytcl/containers/base.py +219 -0
- pytcl/containers/cluster_set.py +12 -2
- pytcl/containers/covertree.py +26 -29
- pytcl/containers/kd_tree.py +94 -29
- pytcl/containers/rtree.py +200 -1
- pytcl/containers/vptree.py +21 -28
- pytcl/coordinate_systems/conversions/geodetic.py +272 -5
- pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
- pytcl/coordinate_systems/projections/__init__.py +1 -1
- pytcl/coordinate_systems/projections/projections.py +2 -2
- pytcl/coordinate_systems/rotations/rotations.py +10 -6
- pytcl/core/__init__.py +18 -0
- pytcl/core/validation.py +333 -2
- pytcl/dynamic_estimation/__init__.py +26 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
- pytcl/dynamic_estimation/imm.py +14 -14
- pytcl/dynamic_estimation/kalman/__init__.py +30 -0
- pytcl/dynamic_estimation/kalman/constrained.py +382 -0
- pytcl/dynamic_estimation/kalman/extended.py +8 -8
- pytcl/dynamic_estimation/kalman/h_infinity.py +613 -0
- pytcl/dynamic_estimation/kalman/square_root.py +60 -573
- pytcl/dynamic_estimation/kalman/sr_ukf.py +302 -0
- pytcl/dynamic_estimation/kalman/ud_filter.py +410 -0
- pytcl/dynamic_estimation/kalman/unscented.py +8 -6
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
- pytcl/dynamic_estimation/rbpf.py +589 -0
- pytcl/gravity/egm.py +13 -0
- pytcl/gravity/spherical_harmonics.py +98 -37
- pytcl/gravity/tides.py +6 -6
- pytcl/logging_config.py +328 -0
- pytcl/magnetism/__init__.py +7 -0
- pytcl/magnetism/emm.py +10 -3
- pytcl/magnetism/wmm.py +260 -23
- pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
- pytcl/mathematical_functions/geometry/geometry.py +5 -5
- pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
- pytcl/mathematical_functions/signal_processing/detection.py +24 -24
- pytcl/mathematical_functions/signal_processing/filters.py +14 -14
- pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
- pytcl/mathematical_functions/special_functions/bessel.py +15 -3
- pytcl/mathematical_functions/special_functions/debye.py +136 -26
- pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
- pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
- pytcl/mathematical_functions/special_functions/hypergeometric.py +81 -15
- pytcl/mathematical_functions/transforms/fourier.py +8 -8
- pytcl/mathematical_functions/transforms/stft.py +12 -12
- pytcl/mathematical_functions/transforms/wavelets.py +9 -9
- pytcl/navigation/geodesy.py +246 -160
- pytcl/navigation/great_circle.py +101 -19
- pytcl/plotting/coordinates.py +7 -7
- pytcl/plotting/tracks.py +2 -2
- pytcl/static_estimation/maximum_likelihood.py +16 -14
- pytcl/static_estimation/robust.py +5 -5
- pytcl/terrain/loaders.py +5 -5
- pytcl/trackers/hypothesis.py +1 -1
- pytcl/trackers/mht.py +9 -9
- pytcl/trackers/multi_target.py +1 -1
- {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-0.22.5.dist-info → nrl_tracker-1.8.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,434 @@
|
|
|
1
|
+
"""Gaussian Sum Filter (GSF) for nonlinear state estimation.
|
|
2
|
+
|
|
3
|
+
The Gaussian Sum Filter represents the posterior distribution as a weighted
|
|
4
|
+
mixture of Gaussians. This allows approximation of multi-modal distributions
|
|
5
|
+
and nonlinear systems better than a single EKF.
|
|
6
|
+
|
|
7
|
+
Each component has:
|
|
8
|
+
- Weight (w_i): Probability of that Gaussian
|
|
9
|
+
- Mean (μ_i): Component state estimate
|
|
10
|
+
- Covariance (P_i): Component uncertainty
|
|
11
|
+
|
|
12
|
+
The filter performs:
|
|
13
|
+
1. Predict: Propagate each component independently
|
|
14
|
+
2. Update: Update each component with measurement, adapt weights
|
|
15
|
+
3. Manage: Prune low-weight components, merge similar ones
|
|
16
|
+
|
|
17
|
+
References
|
|
18
|
+
----------
|
|
19
|
+
Bar-Shalom, Y., Li, X. R., & Kirubarajan, T. (2001). Estimation with
|
|
20
|
+
Applications to Tracking and Navigation. Wiley-Interscience.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from typing import Callable, List, NamedTuple
|
|
24
|
+
|
|
25
|
+
import numpy as np
|
|
26
|
+
from numpy.typing import ArrayLike, NDArray
|
|
27
|
+
|
|
28
|
+
from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class GaussianComponent(NamedTuple):
|
|
32
|
+
"""Single Gaussian component in mixture."""
|
|
33
|
+
|
|
34
|
+
x: NDArray[np.floating] # State estimate
|
|
35
|
+
P: NDArray[np.floating] # Covariance
|
|
36
|
+
w: float # Weight (probability)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class GaussianSumFilter:
|
|
40
|
+
"""Gaussian Sum Filter for nonlinear state estimation.
|
|
41
|
+
|
|
42
|
+
A mixture model approach that represents the posterior distribution
|
|
43
|
+
as a weighted sum of Gaussians. Useful for multi-modal distributions
|
|
44
|
+
and nonlinear systems.
|
|
45
|
+
|
|
46
|
+
Attributes
|
|
47
|
+
----------
|
|
48
|
+
components : list[GaussianComponent]
|
|
49
|
+
Current mixture components (state, covariance, weight).
|
|
50
|
+
max_components : int
|
|
51
|
+
Maximum components to maintain (via pruning/merging).
|
|
52
|
+
merge_threshold : float
|
|
53
|
+
KL divergence threshold for merging components.
|
|
54
|
+
prune_threshold : float
|
|
55
|
+
Weight threshold below which to prune components.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
max_components: int = 5,
|
|
61
|
+
merge_threshold: float = 0.01,
|
|
62
|
+
prune_threshold: float = 1e-3,
|
|
63
|
+
):
|
|
64
|
+
"""Initialize Gaussian Sum Filter.
|
|
65
|
+
|
|
66
|
+
Parameters
|
|
67
|
+
----------
|
|
68
|
+
max_components : int
|
|
69
|
+
Maximum number of Gaussian components to maintain.
|
|
70
|
+
merge_threshold : float
|
|
71
|
+
KL divergence threshold for merging. Components with KL
|
|
72
|
+
divergence below this are merged.
|
|
73
|
+
prune_threshold : float
|
|
74
|
+
Weight threshold for pruning. Components with weight below
|
|
75
|
+
this are removed.
|
|
76
|
+
"""
|
|
77
|
+
self.components: List[GaussianComponent] = []
|
|
78
|
+
self.max_components = max_components
|
|
79
|
+
self.merge_threshold = merge_threshold
|
|
80
|
+
self.prune_threshold = prune_threshold
|
|
81
|
+
|
|
82
|
+
def initialize(
|
|
83
|
+
self,
|
|
84
|
+
x0: ArrayLike,
|
|
85
|
+
P0: ArrayLike,
|
|
86
|
+
num_components: int = 1,
|
|
87
|
+
) -> None:
|
|
88
|
+
"""Initialize filter with initial state.
|
|
89
|
+
|
|
90
|
+
Parameters
|
|
91
|
+
----------
|
|
92
|
+
x0 : array_like
|
|
93
|
+
Initial state estimate, shape (n,).
|
|
94
|
+
P0 : array_like
|
|
95
|
+
Initial covariance, shape (n, n).
|
|
96
|
+
num_components : int
|
|
97
|
+
Number of components to initialize with. If > 1, will
|
|
98
|
+
create multiple components with slightly perturbed means.
|
|
99
|
+
"""
|
|
100
|
+
x0 = np.asarray(x0, dtype=np.float64)
|
|
101
|
+
P0 = np.asarray(P0, dtype=np.float64)
|
|
102
|
+
|
|
103
|
+
self.components = []
|
|
104
|
+
weight = 1.0 / num_components
|
|
105
|
+
|
|
106
|
+
for i in range(num_components):
|
|
107
|
+
if i == 0:
|
|
108
|
+
x = x0.copy()
|
|
109
|
+
else:
|
|
110
|
+
# Slight perturbation for diversity
|
|
111
|
+
x = x0 + np.random.randn(x0.shape[0]) * np.sqrt(np.diag(P0)) * 0.1
|
|
112
|
+
|
|
113
|
+
self.components.append(GaussianComponent(x=x, P=P0.copy(), w=weight))
|
|
114
|
+
|
|
115
|
+
def predict(
|
|
116
|
+
self,
|
|
117
|
+
f: Callable[[NDArray[np.floating]], NDArray[np.floating]],
|
|
118
|
+
F: ArrayLike,
|
|
119
|
+
Q: ArrayLike,
|
|
120
|
+
) -> None:
|
|
121
|
+
"""Predict step: propagate each component.
|
|
122
|
+
|
|
123
|
+
Parameters
|
|
124
|
+
----------
|
|
125
|
+
f : callable
|
|
126
|
+
Nonlinear state transition function f(x).
|
|
127
|
+
F : array_like
|
|
128
|
+
Jacobian of f, shape (n, n).
|
|
129
|
+
Q : array_like
|
|
130
|
+
Process noise covariance, shape (n, n).
|
|
131
|
+
"""
|
|
132
|
+
F = np.asarray(F, dtype=np.float64)
|
|
133
|
+
Q = np.asarray(Q, dtype=np.float64)
|
|
134
|
+
|
|
135
|
+
new_components = []
|
|
136
|
+
for comp in self.components:
|
|
137
|
+
# EKF predict for each component
|
|
138
|
+
pred = ekf_predict(comp.x, comp.P, f, F, Q)
|
|
139
|
+
new_components.append(GaussianComponent(x=pred.x, P=pred.P, w=comp.w))
|
|
140
|
+
|
|
141
|
+
self.components = new_components
|
|
142
|
+
|
|
143
|
+
def update(
|
|
144
|
+
self,
|
|
145
|
+
z: ArrayLike,
|
|
146
|
+
h: Callable[[NDArray[np.floating]], NDArray[np.floating]],
|
|
147
|
+
H: ArrayLike,
|
|
148
|
+
R: ArrayLike,
|
|
149
|
+
) -> None:
|
|
150
|
+
"""Update step: update each component, adapt weights.
|
|
151
|
+
|
|
152
|
+
Parameters
|
|
153
|
+
----------
|
|
154
|
+
z : array_like
|
|
155
|
+
Measurement, shape (m,).
|
|
156
|
+
h : callable
|
|
157
|
+
Nonlinear measurement function h(x).
|
|
158
|
+
H : array_like
|
|
159
|
+
Jacobian of h, shape (m, n).
|
|
160
|
+
R : array_like
|
|
161
|
+
Measurement noise covariance, shape (m, m).
|
|
162
|
+
"""
|
|
163
|
+
z = np.asarray(z, dtype=np.float64)
|
|
164
|
+
H = np.asarray(H, dtype=np.float64)
|
|
165
|
+
R = np.asarray(R, dtype=np.float64)
|
|
166
|
+
|
|
167
|
+
# Update each component and compute likelihoods
|
|
168
|
+
likelihoods = []
|
|
169
|
+
updated_components = []
|
|
170
|
+
|
|
171
|
+
for comp in self.components:
|
|
172
|
+
# EKF update for component
|
|
173
|
+
upd = ekf_update(comp.x, comp.P, z, h, H, R)
|
|
174
|
+
|
|
175
|
+
# Likelihood from this measurement
|
|
176
|
+
likelihood = upd.likelihood
|
|
177
|
+
|
|
178
|
+
updated_components.append(GaussianComponent(x=upd.x, P=upd.P, w=comp.w))
|
|
179
|
+
likelihoods.append(likelihood)
|
|
180
|
+
|
|
181
|
+
# Adapt weights based on measurement likelihood
|
|
182
|
+
likelihoods = np.array(likelihoods)
|
|
183
|
+
weights = np.array([c.w for c in updated_components])
|
|
184
|
+
|
|
185
|
+
# Normalize weights by likelihood (Bayesian update)
|
|
186
|
+
weights = weights * likelihoods
|
|
187
|
+
weight_sum = np.sum(weights)
|
|
188
|
+
|
|
189
|
+
if weight_sum > 0:
|
|
190
|
+
weights = weights / weight_sum
|
|
191
|
+
else:
|
|
192
|
+
# Fallback: equal weights if all likelihoods zero
|
|
193
|
+
weights = np.ones(len(updated_components)) / len(updated_components)
|
|
194
|
+
|
|
195
|
+
# Update components with new weights
|
|
196
|
+
self.components = [
|
|
197
|
+
GaussianComponent(x=c.x, P=c.P, w=w)
|
|
198
|
+
for c, w in zip(updated_components, weights)
|
|
199
|
+
]
|
|
200
|
+
|
|
201
|
+
# Manage components (prune, merge)
|
|
202
|
+
self._prune_components()
|
|
203
|
+
self._merge_components()
|
|
204
|
+
|
|
205
|
+
def _prune_components(self) -> None:
|
|
206
|
+
"""Remove components with weight below threshold."""
|
|
207
|
+
self.components = [c for c in self.components if c.w >= self.prune_threshold]
|
|
208
|
+
|
|
209
|
+
if len(self.components) == 0:
|
|
210
|
+
# Failsafe: keep best component
|
|
211
|
+
self.components = [max(self.components, key=lambda c: c.w)]
|
|
212
|
+
|
|
213
|
+
# Renormalize weights
|
|
214
|
+
total_weight = sum(c.w for c in self.components)
|
|
215
|
+
if total_weight > 0:
|
|
216
|
+
self.components = [
|
|
217
|
+
GaussianComponent(x=c.x, P=c.P, w=c.w / total_weight)
|
|
218
|
+
for c in self.components
|
|
219
|
+
]
|
|
220
|
+
|
|
221
|
+
def _merge_components(self) -> None:
|
|
222
|
+
"""Merge similar components to keep count manageable."""
|
|
223
|
+
while len(self.components) > self.max_components:
|
|
224
|
+
# Find pair with smallest KL divergence
|
|
225
|
+
best_i, best_j, best_kl = 0, 1, float("inf")
|
|
226
|
+
|
|
227
|
+
for i in range(len(self.components)):
|
|
228
|
+
for j in range(i + 1, len(self.components)):
|
|
229
|
+
kl = self._kl_divergence(self.components[i], self.components[j])
|
|
230
|
+
if kl < best_kl:
|
|
231
|
+
best_kl = kl
|
|
232
|
+
best_i = best_j = i
|
|
233
|
+
best_j = j
|
|
234
|
+
|
|
235
|
+
if best_kl < self.merge_threshold:
|
|
236
|
+
# Merge components i and j
|
|
237
|
+
ci = self.components[best_i]
|
|
238
|
+
cj = self.components[best_j]
|
|
239
|
+
|
|
240
|
+
# Merged weight
|
|
241
|
+
w_new = ci.w + cj.w
|
|
242
|
+
|
|
243
|
+
# Merged mean (weighted average)
|
|
244
|
+
x_new = (ci.w * ci.x + cj.w * cj.x) / w_new
|
|
245
|
+
|
|
246
|
+
# Merged covariance (weighted average of covariances
|
|
247
|
+
# plus covariance of means)
|
|
248
|
+
P_new = (ci.w * ci.P + cj.w * cj.P) / w_new
|
|
249
|
+
dx_i = ci.x - x_new
|
|
250
|
+
dx_j = cj.x - x_new
|
|
251
|
+
P_new += (
|
|
252
|
+
ci.w * np.outer(dx_i, dx_i) + cj.w * np.outer(dx_j, dx_j)
|
|
253
|
+
) / w_new
|
|
254
|
+
|
|
255
|
+
# Create merged component
|
|
256
|
+
merged = GaussianComponent(x=x_new, P=P_new, w=w_new)
|
|
257
|
+
|
|
258
|
+
# Replace with merged, remove old
|
|
259
|
+
self.components = [
|
|
260
|
+
c
|
|
261
|
+
for i, c in enumerate(self.components)
|
|
262
|
+
if i != best_i and i != best_j
|
|
263
|
+
]
|
|
264
|
+
self.components.append(merged)
|
|
265
|
+
else:
|
|
266
|
+
# Can't merge more, stop
|
|
267
|
+
break
|
|
268
|
+
|
|
269
|
+
@staticmethod
|
|
270
|
+
def _kl_divergence(c1: GaussianComponent, c2: GaussianComponent) -> float:
|
|
271
|
+
"""Compute KL divergence between two Gaussians.
|
|
272
|
+
|
|
273
|
+
KL(N1 || N2) = 0.5 * [tr(P2^{-1}P1) + (μ2-μ1)^T P2^{-1}
|
|
274
|
+
(μ2-μ1) - n + ln|P2|/|P1|]
|
|
275
|
+
|
|
276
|
+
Parameters
|
|
277
|
+
----------
|
|
278
|
+
c1, c2 : GaussianComponent
|
|
279
|
+
Gaussian components.
|
|
280
|
+
|
|
281
|
+
Returns
|
|
282
|
+
-------
|
|
283
|
+
kl : float
|
|
284
|
+
KL divergence from c1 to c2.
|
|
285
|
+
"""
|
|
286
|
+
dx = c2.x - c1.x
|
|
287
|
+
n = len(c1.x)
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
P2_inv = np.linalg.inv(c2.P)
|
|
291
|
+
logdet_ratio = np.linalg.slogdet(c2.P)[1] - np.linalg.slogdet(c1.P)[1]
|
|
292
|
+
|
|
293
|
+
trace_term = np.trace(P2_inv @ c1.P)
|
|
294
|
+
quad_term = dx @ P2_inv @ dx
|
|
295
|
+
kl = 0.5 * (trace_term + quad_term - n + logdet_ratio)
|
|
296
|
+
|
|
297
|
+
return float(np.clip(kl, 0, np.inf))
|
|
298
|
+
except np.linalg.LinAlgError:
|
|
299
|
+
# Singular matrix, return large KL
|
|
300
|
+
return 1e6
|
|
301
|
+
|
|
302
|
+
def estimate(self) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
303
|
+
"""Get overall state estimate (weighted mean and covariance).
|
|
304
|
+
|
|
305
|
+
Returns
|
|
306
|
+
-------
|
|
307
|
+
x : ndarray
|
|
308
|
+
Weighted mean of components.
|
|
309
|
+
P : ndarray
|
|
310
|
+
Weighted covariance of components.
|
|
311
|
+
"""
|
|
312
|
+
if not self.components:
|
|
313
|
+
raise ValueError("No components initialized")
|
|
314
|
+
|
|
315
|
+
# Weighted mean
|
|
316
|
+
x_est = np.zeros_like(self.components[0].x)
|
|
317
|
+
for comp in self.components:
|
|
318
|
+
x_est += comp.w * comp.x
|
|
319
|
+
|
|
320
|
+
# Weighted covariance
|
|
321
|
+
P_est = np.zeros_like(self.components[0].P)
|
|
322
|
+
for comp in self.components:
|
|
323
|
+
dx = comp.x - x_est
|
|
324
|
+
P_est += comp.w * (comp.P + np.outer(dx, dx))
|
|
325
|
+
|
|
326
|
+
return x_est, P_est
|
|
327
|
+
|
|
328
|
+
def get_components(self) -> List[GaussianComponent]:
|
|
329
|
+
"""Get current mixture components.
|
|
330
|
+
|
|
331
|
+
Returns
|
|
332
|
+
-------
|
|
333
|
+
components : list[GaussianComponent]
|
|
334
|
+
List of components with (x, P, w).
|
|
335
|
+
"""
|
|
336
|
+
return self.components.copy()
|
|
337
|
+
|
|
338
|
+
def get_num_components(self) -> int:
|
|
339
|
+
"""Get number of current components."""
|
|
340
|
+
return len(self.components)
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def gaussian_sum_filter_predict(
|
|
344
|
+
components: List[GaussianComponent],
|
|
345
|
+
f: Callable[[NDArray[np.floating]], NDArray[np.floating]],
|
|
346
|
+
F: ArrayLike,
|
|
347
|
+
Q: ArrayLike,
|
|
348
|
+
) -> List[GaussianComponent]:
|
|
349
|
+
"""Convenience function for GSF prediction.
|
|
350
|
+
|
|
351
|
+
Parameters
|
|
352
|
+
----------
|
|
353
|
+
components : list[GaussianComponent]
|
|
354
|
+
Current mixture components.
|
|
355
|
+
f : callable
|
|
356
|
+
Nonlinear state transition function.
|
|
357
|
+
F : array_like
|
|
358
|
+
Jacobian of f.
|
|
359
|
+
Q : array_like
|
|
360
|
+
Process noise covariance.
|
|
361
|
+
|
|
362
|
+
Returns
|
|
363
|
+
-------
|
|
364
|
+
components_new : list[GaussianComponent]
|
|
365
|
+
Predicted components.
|
|
366
|
+
"""
|
|
367
|
+
F = np.asarray(F, dtype=np.float64)
|
|
368
|
+
Q = np.asarray(Q, dtype=np.float64)
|
|
369
|
+
|
|
370
|
+
new_components = []
|
|
371
|
+
for comp in components:
|
|
372
|
+
pred = ekf_predict(comp.x, comp.P, f, F, Q)
|
|
373
|
+
new_components.append(GaussianComponent(x=pred.x, P=pred.P, w=comp.w))
|
|
374
|
+
|
|
375
|
+
return new_components
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def gaussian_sum_filter_update(
|
|
379
|
+
components: List[GaussianComponent],
|
|
380
|
+
z: ArrayLike,
|
|
381
|
+
h: Callable[[NDArray[np.floating]], NDArray[np.floating]],
|
|
382
|
+
H: ArrayLike,
|
|
383
|
+
R: ArrayLike,
|
|
384
|
+
) -> List[GaussianComponent]:
|
|
385
|
+
"""Convenience function for GSF update.
|
|
386
|
+
|
|
387
|
+
Parameters
|
|
388
|
+
----------
|
|
389
|
+
components : list[GaussianComponent]
|
|
390
|
+
Predicted mixture components.
|
|
391
|
+
z : array_like
|
|
392
|
+
Measurement.
|
|
393
|
+
h : callable
|
|
394
|
+
Nonlinear measurement function.
|
|
395
|
+
H : array_like
|
|
396
|
+
Jacobian of h.
|
|
397
|
+
R : array_like
|
|
398
|
+
Measurement noise covariance.
|
|
399
|
+
|
|
400
|
+
Returns
|
|
401
|
+
-------
|
|
402
|
+
components_new : list[GaussianComponent]
|
|
403
|
+
Updated components with adapted weights.
|
|
404
|
+
"""
|
|
405
|
+
z = np.asarray(z, dtype=np.float64)
|
|
406
|
+
H = np.asarray(H, dtype=np.float64)
|
|
407
|
+
R = np.asarray(R, dtype=np.float64)
|
|
408
|
+
|
|
409
|
+
likelihoods = []
|
|
410
|
+
updated_components = []
|
|
411
|
+
|
|
412
|
+
for comp in components:
|
|
413
|
+
upd = ekf_update(comp.x, comp.P, z, h, H, R)
|
|
414
|
+
likelihood = upd.likelihood
|
|
415
|
+
|
|
416
|
+
updated_components.append(GaussianComponent(x=upd.x, P=upd.P, w=comp.w))
|
|
417
|
+
likelihoods.append(likelihood)
|
|
418
|
+
|
|
419
|
+
# Adapt weights
|
|
420
|
+
likelihoods = np.array(likelihoods)
|
|
421
|
+
weights = np.array([c.w for c in updated_components])
|
|
422
|
+
|
|
423
|
+
weights = weights * likelihoods
|
|
424
|
+
weight_sum = np.sum(weights)
|
|
425
|
+
|
|
426
|
+
if weight_sum > 0:
|
|
427
|
+
weights = weights / weight_sum
|
|
428
|
+
else:
|
|
429
|
+
weights = np.ones(len(updated_components)) / len(updated_components)
|
|
430
|
+
|
|
431
|
+
return [
|
|
432
|
+
GaussianComponent(x=c.x, P=c.P, w=w)
|
|
433
|
+
for c, w in zip(updated_components, weights)
|
|
434
|
+
]
|
pytcl/dynamic_estimation/imm.py
CHANGED
|
@@ -12,7 +12,7 @@ The IMM algorithm consists of four steps:
|
|
|
12
12
|
4. Output combination
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
from typing import List, NamedTuple, Optional
|
|
15
|
+
from typing import Any, List, NamedTuple, Optional
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -101,7 +101,7 @@ class IMMUpdate(NamedTuple):
|
|
|
101
101
|
def compute_mixing_probabilities(
|
|
102
102
|
mode_probs: ArrayLike,
|
|
103
103
|
transition_matrix: ArrayLike,
|
|
104
|
-
) -> tuple[NDArray, NDArray]:
|
|
104
|
+
) -> tuple[NDArray[Any], NDArray[Any]]:
|
|
105
105
|
"""
|
|
106
106
|
Compute mixing probabilities and predicted mode probabilities.
|
|
107
107
|
|
|
@@ -143,10 +143,10 @@ def compute_mixing_probabilities(
|
|
|
143
143
|
|
|
144
144
|
|
|
145
145
|
def mix_states(
|
|
146
|
-
mode_states: List[NDArray],
|
|
147
|
-
mode_covs: List[NDArray],
|
|
148
|
-
mixing_probs: NDArray,
|
|
149
|
-
) -> tuple[List[NDArray], List[NDArray]]:
|
|
146
|
+
mode_states: List[NDArray[Any]],
|
|
147
|
+
mode_covs: List[NDArray[Any]],
|
|
148
|
+
mixing_probs: NDArray[Any],
|
|
149
|
+
) -> tuple[List[NDArray[Any]], List[NDArray[Any]]]:
|
|
150
150
|
"""
|
|
151
151
|
Mix states and covariances for interaction step.
|
|
152
152
|
|
|
@@ -197,10 +197,10 @@ def mix_states(
|
|
|
197
197
|
|
|
198
198
|
|
|
199
199
|
def combine_estimates(
|
|
200
|
-
mode_states: List[NDArray],
|
|
201
|
-
mode_covs: List[NDArray],
|
|
202
|
-
mode_probs: NDArray,
|
|
203
|
-
) -> tuple[NDArray, NDArray]:
|
|
200
|
+
mode_states: List[NDArray[Any]],
|
|
201
|
+
mode_covs: List[NDArray[Any]],
|
|
202
|
+
mode_probs: NDArray[Any],
|
|
203
|
+
) -> tuple[NDArray[Any], NDArray[Any]]:
|
|
204
204
|
"""
|
|
205
205
|
Combine mode-conditioned estimates into overall estimate.
|
|
206
206
|
|
|
@@ -546,10 +546,10 @@ class IMMEstimator:
|
|
|
546
546
|
self.mode_covs = [np.eye(state_dim) for _ in range(n_modes)]
|
|
547
547
|
|
|
548
548
|
# Mode-specific models (must be set by user)
|
|
549
|
-
self.F_list: List[NDArray] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
550
|
-
self.Q_list: List[NDArray] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
551
|
-
self.H_list: List[NDArray] = []
|
|
552
|
-
self.R_list: List[NDArray] = []
|
|
549
|
+
self.F_list: List[NDArray[Any]] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
550
|
+
self.Q_list: List[NDArray[Any]] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
551
|
+
self.H_list: List[NDArray[Any]] = []
|
|
552
|
+
self.R_list: List[NDArray[Any]] = []
|
|
553
553
|
|
|
554
554
|
# Combined estimates
|
|
555
555
|
self.x = np.zeros(state_dim)
|
|
@@ -4,13 +4,21 @@ Kalman filter family implementations.
|
|
|
4
4
|
This module provides:
|
|
5
5
|
- Linear Kalman filter (predict, update, smoothing)
|
|
6
6
|
- Extended Kalman filter (EKF)
|
|
7
|
+
- Constrained Extended Kalman filter (CEKF)
|
|
7
8
|
- Unscented Kalman filter (UKF)
|
|
8
9
|
- Cubature Kalman filter (CKF)
|
|
9
10
|
- Information filter
|
|
10
11
|
- Square-root Kalman filters (numerically stable)
|
|
11
12
|
- U-D factorization filter (Bierman's method)
|
|
13
|
+
- H-infinity filter (robust filtering)
|
|
12
14
|
"""
|
|
13
15
|
|
|
16
|
+
from pytcl.dynamic_estimation.kalman.constrained import (
|
|
17
|
+
ConstrainedEKF,
|
|
18
|
+
ConstraintFunction,
|
|
19
|
+
constrained_ekf_predict,
|
|
20
|
+
constrained_ekf_update,
|
|
21
|
+
)
|
|
14
22
|
from pytcl.dynamic_estimation.kalman.extended import (
|
|
15
23
|
ekf_predict,
|
|
16
24
|
ekf_predict_auto,
|
|
@@ -19,6 +27,15 @@ from pytcl.dynamic_estimation.kalman.extended import (
|
|
|
19
27
|
iterated_ekf_update,
|
|
20
28
|
numerical_jacobian,
|
|
21
29
|
)
|
|
30
|
+
from pytcl.dynamic_estimation.kalman.h_infinity import (
|
|
31
|
+
HInfinityPrediction,
|
|
32
|
+
HInfinityUpdate,
|
|
33
|
+
extended_hinf_update,
|
|
34
|
+
find_min_gamma,
|
|
35
|
+
hinf_predict,
|
|
36
|
+
hinf_predict_update,
|
|
37
|
+
hinf_update,
|
|
38
|
+
)
|
|
22
39
|
from pytcl.dynamic_estimation.kalman.linear import (
|
|
23
40
|
KalmanPrediction,
|
|
24
41
|
KalmanState,
|
|
@@ -61,6 +78,11 @@ from pytcl.dynamic_estimation.kalman.unscented import (
|
|
|
61
78
|
)
|
|
62
79
|
|
|
63
80
|
__all__ = [
|
|
81
|
+
# Constrained EKF
|
|
82
|
+
"ConstraintFunction",
|
|
83
|
+
"ConstrainedEKF",
|
|
84
|
+
"constrained_ekf_predict",
|
|
85
|
+
"constrained_ekf_update",
|
|
64
86
|
# Linear KF
|
|
65
87
|
"KalmanState",
|
|
66
88
|
"KalmanPrediction",
|
|
@@ -108,4 +130,12 @@ __all__ = [
|
|
|
108
130
|
# Square-root UKF
|
|
109
131
|
"sr_ukf_predict",
|
|
110
132
|
"sr_ukf_update",
|
|
133
|
+
# H-infinity filter
|
|
134
|
+
"HInfinityPrediction",
|
|
135
|
+
"HInfinityUpdate",
|
|
136
|
+
"hinf_predict",
|
|
137
|
+
"hinf_update",
|
|
138
|
+
"hinf_predict_update",
|
|
139
|
+
"extended_hinf_update",
|
|
140
|
+
"find_min_gamma",
|
|
111
141
|
]
|