nrl-tracker 1.6.0__py3-none-any.whl → 1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/METADATA +14 -10
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/RECORD +75 -68
- pytcl/__init__.py +2 -2
- pytcl/assignment_algorithms/__init__.py +28 -0
- pytcl/assignment_algorithms/gating.py +10 -10
- pytcl/assignment_algorithms/jpda.py +40 -40
- pytcl/assignment_algorithms/nd_assignment.py +379 -0
- pytcl/assignment_algorithms/network_flow.py +371 -0
- pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
- pytcl/astronomical/__init__.py +35 -0
- pytcl/astronomical/ephemerides.py +14 -11
- pytcl/astronomical/reference_frames.py +110 -4
- pytcl/astronomical/relativity.py +6 -5
- pytcl/astronomical/special_orbits.py +532 -0
- pytcl/atmosphere/__init__.py +11 -0
- pytcl/atmosphere/nrlmsise00.py +809 -0
- pytcl/clustering/dbscan.py +2 -2
- pytcl/clustering/gaussian_mixture.py +3 -3
- pytcl/clustering/hierarchical.py +15 -15
- pytcl/clustering/kmeans.py +4 -4
- pytcl/containers/base.py +3 -3
- pytcl/containers/cluster_set.py +12 -2
- pytcl/containers/covertree.py +5 -3
- pytcl/containers/rtree.py +1 -1
- pytcl/containers/vptree.py +4 -2
- pytcl/coordinate_systems/conversions/geodetic.py +272 -5
- pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
- pytcl/coordinate_systems/projections/projections.py +2 -2
- pytcl/coordinate_systems/rotations/rotations.py +10 -6
- pytcl/core/validation.py +3 -3
- pytcl/dynamic_estimation/__init__.py +26 -0
- pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
- pytcl/dynamic_estimation/imm.py +14 -14
- pytcl/dynamic_estimation/kalman/__init__.py +12 -0
- pytcl/dynamic_estimation/kalman/constrained.py +382 -0
- pytcl/dynamic_estimation/kalman/extended.py +8 -8
- pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
- pytcl/dynamic_estimation/kalman/square_root.py +8 -2
- pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
- pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
- pytcl/dynamic_estimation/kalman/unscented.py +8 -6
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
- pytcl/dynamic_estimation/rbpf.py +589 -0
- pytcl/gravity/spherical_harmonics.py +3 -3
- pytcl/gravity/tides.py +6 -6
- pytcl/logging_config.py +3 -3
- pytcl/magnetism/emm.py +10 -3
- pytcl/magnetism/wmm.py +4 -4
- pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
- pytcl/mathematical_functions/geometry/geometry.py +5 -5
- pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
- pytcl/mathematical_functions/signal_processing/detection.py +24 -24
- pytcl/mathematical_functions/signal_processing/filters.py +14 -14
- pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
- pytcl/mathematical_functions/special_functions/bessel.py +15 -3
- pytcl/mathematical_functions/special_functions/debye.py +5 -1
- pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
- pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
- pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
- pytcl/mathematical_functions/transforms/fourier.py +8 -8
- pytcl/mathematical_functions/transforms/stft.py +12 -12
- pytcl/mathematical_functions/transforms/wavelets.py +9 -9
- pytcl/navigation/geodesy.py +3 -3
- pytcl/navigation/great_circle.py +5 -5
- pytcl/plotting/coordinates.py +7 -7
- pytcl/plotting/tracks.py +2 -2
- pytcl/static_estimation/maximum_likelihood.py +16 -14
- pytcl/static_estimation/robust.py +5 -5
- pytcl/terrain/loaders.py +5 -5
- pytcl/trackers/hypothesis.py +1 -1
- pytcl/trackers/mht.py +9 -9
- pytcl/trackers/multi_target.py +1 -1
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.6.0.dist-info → nrl_tracker-1.7.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Constrained Extended Kalman Filter (CEKF).
|
|
3
|
+
|
|
4
|
+
Extends the Extended Kalman Filter to enforce constraints on the state
|
|
5
|
+
estimate. Uses Lagrange multiplier method to project onto constraint manifold
|
|
6
|
+
while maintaining positive definite covariance.
|
|
7
|
+
|
|
8
|
+
References
|
|
9
|
+
----------
|
|
10
|
+
.. [1] Simon, D. (2006). Optimal State Estimation: Kalman, H∞, and Nonlinear
|
|
11
|
+
Approaches. Wiley-Interscience.
|
|
12
|
+
.. [2] Simon, D. & Simon, D. L. (2010). Constrained Kalman filtering via
|
|
13
|
+
density function truncation. Journal of Guidance, Control, and Dynamics.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from typing import Any, Callable, Optional
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
from numpy.typing import ArrayLike, NDArray
|
|
20
|
+
|
|
21
|
+
from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
|
|
22
|
+
from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdate
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ConstraintFunction:
|
|
26
|
+
"""Base class for state constraints."""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
g: Callable[[NDArray[Any]], NDArray[Any]],
|
|
31
|
+
G: Optional[Callable[[NDArray[Any]], NDArray[Any]]] = None,
|
|
32
|
+
constraint_type: str = "inequality",
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
Define a constraint: g(x) ≤ 0 (inequality) or g(x) = 0 (equality).
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
g : callable
|
|
40
|
+
Constraint function: g(x) -> scalar or array
|
|
41
|
+
Inequality: g(x) ≤ 0
|
|
42
|
+
Equality: g(x) = 0
|
|
43
|
+
G : callable, optional
|
|
44
|
+
Jacobian of g with respect to x: ∂g/∂x
|
|
45
|
+
If None, computed numerically.
|
|
46
|
+
constraint_type : {'inequality', 'equality'}
|
|
47
|
+
Constraint type.
|
|
48
|
+
"""
|
|
49
|
+
self.g = g
|
|
50
|
+
self.G = G
|
|
51
|
+
self.constraint_type = constraint_type
|
|
52
|
+
|
|
53
|
+
def evaluate(self, x: NDArray[Any]) -> NDArray[Any]:
|
|
54
|
+
"""Evaluate constraint at state x."""
|
|
55
|
+
return np.atleast_1d(np.asarray(self.g(x), dtype=np.float64))
|
|
56
|
+
|
|
57
|
+
def jacobian(self, x: NDArray[Any]) -> NDArray[Any]:
|
|
58
|
+
"""Compute constraint Jacobian at x."""
|
|
59
|
+
if self.G is not None:
|
|
60
|
+
return np.atleast_2d(np.asarray(self.G(x), dtype=np.float64))
|
|
61
|
+
else:
|
|
62
|
+
# Numerical differentiation
|
|
63
|
+
eps = 1e-6
|
|
64
|
+
n = len(x)
|
|
65
|
+
g_x = self.evaluate(x)
|
|
66
|
+
m = len(g_x)
|
|
67
|
+
J = np.zeros((m, n))
|
|
68
|
+
for i in range(n):
|
|
69
|
+
x_plus = x.copy()
|
|
70
|
+
x_plus[i] += eps
|
|
71
|
+
g_plus = self.evaluate(x_plus)
|
|
72
|
+
J[:, i] = (g_plus - g_x) / eps
|
|
73
|
+
return J
|
|
74
|
+
|
|
75
|
+
def is_satisfied(self, x: NDArray[Any], tol: float = 1e-6) -> bool:
|
|
76
|
+
"""Check if constraint is satisfied."""
|
|
77
|
+
g_val = self.evaluate(x)
|
|
78
|
+
if self.constraint_type == "inequality":
|
|
79
|
+
return np.all(g_val <= tol)
|
|
80
|
+
else: # equality
|
|
81
|
+
return np.allclose(g_val, 0, atol=tol)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class ConstrainedEKF:
|
|
85
|
+
"""
|
|
86
|
+
Extended Kalman Filter with state constraints.
|
|
87
|
+
|
|
88
|
+
Enforces linear and/or nonlinear constraints on state estimate using
|
|
89
|
+
Lagrange multiplier method with covariance projection.
|
|
90
|
+
|
|
91
|
+
Attributes
|
|
92
|
+
----------
|
|
93
|
+
constraints : list of ConstraintFunction
|
|
94
|
+
List of active constraints.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(self) -> None:
|
|
98
|
+
"""Initialize Constrained EKF."""
|
|
99
|
+
self.constraints: list[ConstraintFunction] = []
|
|
100
|
+
|
|
101
|
+
def add_constraint(self, constraint: ConstraintFunction) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Add a constraint to the filter.
|
|
104
|
+
|
|
105
|
+
Parameters
|
|
106
|
+
----------
|
|
107
|
+
constraint : ConstraintFunction
|
|
108
|
+
Constraint to enforce.
|
|
109
|
+
"""
|
|
110
|
+
self.constraints.append(constraint)
|
|
111
|
+
|
|
112
|
+
def predict(
|
|
113
|
+
self,
|
|
114
|
+
x: ArrayLike,
|
|
115
|
+
P: ArrayLike,
|
|
116
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
117
|
+
F: ArrayLike,
|
|
118
|
+
Q: ArrayLike,
|
|
119
|
+
) -> KalmanPrediction:
|
|
120
|
+
"""
|
|
121
|
+
Constrained EKF prediction step.
|
|
122
|
+
|
|
123
|
+
Performs standard EKF prediction (constraints not enforced here,
|
|
124
|
+
only checked). Constraint enforcement happens in update step.
|
|
125
|
+
|
|
126
|
+
Parameters
|
|
127
|
+
----------
|
|
128
|
+
x : array_like
|
|
129
|
+
Current state estimate, shape (n,).
|
|
130
|
+
P : array_like
|
|
131
|
+
Current state covariance, shape (n, n).
|
|
132
|
+
f : callable
|
|
133
|
+
Nonlinear state transition function.
|
|
134
|
+
F : array_like
|
|
135
|
+
Jacobian of f at current state.
|
|
136
|
+
Q : array_like
|
|
137
|
+
Process noise covariance, shape (n, n).
|
|
138
|
+
|
|
139
|
+
Returns
|
|
140
|
+
-------
|
|
141
|
+
result : KalmanPrediction
|
|
142
|
+
Predicted state and covariance.
|
|
143
|
+
"""
|
|
144
|
+
return ekf_predict(x, P, f, F, Q)
|
|
145
|
+
|
|
146
|
+
def update(
|
|
147
|
+
self,
|
|
148
|
+
x: ArrayLike,
|
|
149
|
+
P: ArrayLike,
|
|
150
|
+
z: ArrayLike,
|
|
151
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
152
|
+
H: ArrayLike,
|
|
153
|
+
R: ArrayLike,
|
|
154
|
+
) -> KalmanUpdate:
|
|
155
|
+
"""
|
|
156
|
+
Constrained EKF update step.
|
|
157
|
+
|
|
158
|
+
Performs standard EKF update, then projects result onto constraint
|
|
159
|
+
manifold.
|
|
160
|
+
|
|
161
|
+
Parameters
|
|
162
|
+
----------
|
|
163
|
+
x : array_like
|
|
164
|
+
Predicted state estimate, shape (n,).
|
|
165
|
+
P : array_like
|
|
166
|
+
Predicted state covariance, shape (n, n).
|
|
167
|
+
z : array_like
|
|
168
|
+
Measurement, shape (m,).
|
|
169
|
+
h : callable
|
|
170
|
+
Nonlinear measurement function.
|
|
171
|
+
H : array_like
|
|
172
|
+
Jacobian of h at current state.
|
|
173
|
+
R : array_like
|
|
174
|
+
Measurement noise covariance, shape (m, m).
|
|
175
|
+
|
|
176
|
+
Returns
|
|
177
|
+
-------
|
|
178
|
+
result : KalmanUpdate
|
|
179
|
+
Updated state and covariance (constrained).
|
|
180
|
+
"""
|
|
181
|
+
# Standard EKF update
|
|
182
|
+
result = ekf_update(x, P, z, h, H, R)
|
|
183
|
+
x_upd = result.x
|
|
184
|
+
P_upd = result.P
|
|
185
|
+
|
|
186
|
+
# Apply constraint projection
|
|
187
|
+
if self.constraints:
|
|
188
|
+
x_upd, P_upd = self._project_onto_constraints(x_upd, P_upd)
|
|
189
|
+
|
|
190
|
+
return KalmanUpdate(
|
|
191
|
+
x=x_upd,
|
|
192
|
+
P=P_upd,
|
|
193
|
+
y=result.y,
|
|
194
|
+
S=result.S,
|
|
195
|
+
K=result.K,
|
|
196
|
+
likelihood=result.likelihood,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def _project_onto_constraints(
|
|
200
|
+
self,
|
|
201
|
+
x: NDArray[Any],
|
|
202
|
+
P: NDArray[Any],
|
|
203
|
+
max_iter: int = 10,
|
|
204
|
+
tol: float = 1e-6,
|
|
205
|
+
) -> tuple[NDArray[Any], NDArray[Any]]:
|
|
206
|
+
"""
|
|
207
|
+
Project state and covariance onto constraint manifold.
|
|
208
|
+
|
|
209
|
+
Uses iterative Lagrange multiplier method with covariance
|
|
210
|
+
projection to enforce constraints while maintaining positive
|
|
211
|
+
definiteness.
|
|
212
|
+
|
|
213
|
+
Parameters
|
|
214
|
+
----------
|
|
215
|
+
x : ndarray
|
|
216
|
+
State estimate, shape (n,).
|
|
217
|
+
P : ndarray
|
|
218
|
+
Covariance matrix, shape (n, n).
|
|
219
|
+
max_iter : int
|
|
220
|
+
Maximum iterations for constraint projection.
|
|
221
|
+
tol : float
|
|
222
|
+
Convergence tolerance.
|
|
223
|
+
|
|
224
|
+
Returns
|
|
225
|
+
-------
|
|
226
|
+
x_proj : ndarray
|
|
227
|
+
Constrained state estimate.
|
|
228
|
+
P_proj : ndarray
|
|
229
|
+
Projected covariance.
|
|
230
|
+
"""
|
|
231
|
+
x_proj = x.copy()
|
|
232
|
+
P_proj = P.copy()
|
|
233
|
+
|
|
234
|
+
# Check which constraints are violated
|
|
235
|
+
violated: list[ConstraintFunction] = [
|
|
236
|
+
c for c in self.constraints if not c.is_satisfied(x_proj)
|
|
237
|
+
]
|
|
238
|
+
|
|
239
|
+
if not violated:
|
|
240
|
+
return x_proj, P_proj
|
|
241
|
+
|
|
242
|
+
# Iterative projection
|
|
243
|
+
for iteration in range(max_iter):
|
|
244
|
+
converged = True
|
|
245
|
+
|
|
246
|
+
for constraint in violated:
|
|
247
|
+
g_val = constraint.evaluate(x_proj)
|
|
248
|
+
G = constraint.jacobian(x_proj)
|
|
249
|
+
|
|
250
|
+
# Only process violated constraints
|
|
251
|
+
if constraint.constraint_type == "inequality":
|
|
252
|
+
mask = g_val > tol
|
|
253
|
+
else:
|
|
254
|
+
mask = np.abs(g_val) > tol
|
|
255
|
+
|
|
256
|
+
if not np.any(mask):
|
|
257
|
+
continue
|
|
258
|
+
|
|
259
|
+
converged = False
|
|
260
|
+
|
|
261
|
+
# Lagrange multiplier for this constraint
|
|
262
|
+
# λ = -(G P Gᵀ + μ)⁻¹ G (x - x_ref)
|
|
263
|
+
# where x_ref is desired state (we use x)
|
|
264
|
+
|
|
265
|
+
GP = G @ P_proj
|
|
266
|
+
GPGt = GP @ G.T
|
|
267
|
+
|
|
268
|
+
# Add small regularization for numerical stability
|
|
269
|
+
mu = np.eye(GPGt.shape[0]) * 1e-6
|
|
270
|
+
|
|
271
|
+
try:
|
|
272
|
+
GPGt_inv = np.linalg.inv(GPGt + mu)
|
|
273
|
+
lam = -GPGt_inv @ (G @ x_proj + g_val)
|
|
274
|
+
|
|
275
|
+
# State correction
|
|
276
|
+
x_corr = P_proj @ G.T @ lam
|
|
277
|
+
x_proj = x_proj + x_corr
|
|
278
|
+
|
|
279
|
+
# Covariance projection
|
|
280
|
+
# P_proj = P - P G^T (G P G^T)^{-1} G P
|
|
281
|
+
P_proj = P_proj - GP.T @ GPGt_inv @ GP
|
|
282
|
+
|
|
283
|
+
# Ensure symmetry
|
|
284
|
+
P_proj = (P_proj + P_proj.T) / 2
|
|
285
|
+
|
|
286
|
+
# Enforce positive definiteness
|
|
287
|
+
eigvals, eigvecs = np.linalg.eigh(P_proj)
|
|
288
|
+
if np.any(eigvals < 0):
|
|
289
|
+
eigvals[eigvals < 1e-10] = 1e-10
|
|
290
|
+
P_proj = eigvecs @ np.diag(eigvals) @ eigvecs.T
|
|
291
|
+
|
|
292
|
+
except np.linalg.LinAlgError:
|
|
293
|
+
# If inversion fails, use pseudoinverse
|
|
294
|
+
GPGt_pinv = np.linalg.pinv(GPGt)
|
|
295
|
+
lam = -GPGt_pinv @ (G @ x_proj + g_val)
|
|
296
|
+
x_proj = x_proj + P_proj @ G.T @ lam
|
|
297
|
+
|
|
298
|
+
if converged:
|
|
299
|
+
break
|
|
300
|
+
|
|
301
|
+
return x_proj, P_proj
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def constrained_ekf_predict(
|
|
305
|
+
x: ArrayLike,
|
|
306
|
+
P: ArrayLike,
|
|
307
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
308
|
+
F: ArrayLike,
|
|
309
|
+
Q: ArrayLike,
|
|
310
|
+
) -> KalmanPrediction:
|
|
311
|
+
"""
|
|
312
|
+
Convenience function for constrained EKF prediction.
|
|
313
|
+
|
|
314
|
+
Parameters
|
|
315
|
+
----------
|
|
316
|
+
x : array_like
|
|
317
|
+
Current state estimate.
|
|
318
|
+
P : array_like
|
|
319
|
+
Current covariance.
|
|
320
|
+
f : callable
|
|
321
|
+
Nonlinear dynamics function.
|
|
322
|
+
F : array_like
|
|
323
|
+
Jacobian of f.
|
|
324
|
+
Q : array_like
|
|
325
|
+
Process noise covariance.
|
|
326
|
+
|
|
327
|
+
Returns
|
|
328
|
+
-------
|
|
329
|
+
result : KalmanPrediction
|
|
330
|
+
Predicted state and covariance.
|
|
331
|
+
"""
|
|
332
|
+
cekf = ConstrainedEKF()
|
|
333
|
+
return cekf.predict(x, P, f, F, Q)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
def constrained_ekf_update(
|
|
337
|
+
x: ArrayLike,
|
|
338
|
+
P: ArrayLike,
|
|
339
|
+
z: ArrayLike,
|
|
340
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
341
|
+
H: ArrayLike,
|
|
342
|
+
R: ArrayLike,
|
|
343
|
+
constraints: Optional[list[ConstraintFunction]] = None,
|
|
344
|
+
) -> KalmanUpdate:
|
|
345
|
+
"""
|
|
346
|
+
Convenience function for constrained EKF update.
|
|
347
|
+
|
|
348
|
+
Parameters
|
|
349
|
+
----------
|
|
350
|
+
x : array_like
|
|
351
|
+
Predicted state.
|
|
352
|
+
P : array_like
|
|
353
|
+
Predicted covariance.
|
|
354
|
+
z : array_like
|
|
355
|
+
Measurement.
|
|
356
|
+
h : callable
|
|
357
|
+
Nonlinear measurement function.
|
|
358
|
+
H : array_like
|
|
359
|
+
Jacobian of h.
|
|
360
|
+
R : array_like
|
|
361
|
+
Measurement noise covariance.
|
|
362
|
+
constraints : list, optional
|
|
363
|
+
List of ConstraintFunction objects.
|
|
364
|
+
|
|
365
|
+
Returns
|
|
366
|
+
-------
|
|
367
|
+
result : KalmanUpdate
|
|
368
|
+
Updated state and covariance.
|
|
369
|
+
"""
|
|
370
|
+
cekf = ConstrainedEKF()
|
|
371
|
+
if constraints:
|
|
372
|
+
for c in constraints:
|
|
373
|
+
cekf.add_constraint(c)
|
|
374
|
+
return cekf.update(x, P, z, h, H, R)
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
__all__ = [
|
|
378
|
+
"ConstraintFunction",
|
|
379
|
+
"ConstrainedEKF",
|
|
380
|
+
"constrained_ekf_predict",
|
|
381
|
+
"constrained_ekf_update",
|
|
382
|
+
]
|
|
@@ -5,7 +5,7 @@ The EKF handles nonlinear dynamics and/or measurements by linearizing
|
|
|
5
5
|
around the current state estimate using Jacobians.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Callable
|
|
8
|
+
from typing import Any, Callable
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -16,7 +16,7 @@ from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdat
|
|
|
16
16
|
def ekf_predict(
|
|
17
17
|
x: ArrayLike,
|
|
18
18
|
P: ArrayLike,
|
|
19
|
-
f: Callable[[NDArray], NDArray],
|
|
19
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
20
20
|
F: ArrayLike,
|
|
21
21
|
Q: ArrayLike,
|
|
22
22
|
) -> KalmanPrediction:
|
|
@@ -86,7 +86,7 @@ def ekf_update(
|
|
|
86
86
|
x: ArrayLike,
|
|
87
87
|
P: ArrayLike,
|
|
88
88
|
z: ArrayLike,
|
|
89
|
-
h: Callable[[NDArray], NDArray],
|
|
89
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
90
90
|
H: ArrayLike,
|
|
91
91
|
R: ArrayLike,
|
|
92
92
|
) -> KalmanUpdate:
|
|
@@ -183,7 +183,7 @@ def ekf_update(
|
|
|
183
183
|
|
|
184
184
|
|
|
185
185
|
def numerical_jacobian(
|
|
186
|
-
f: Callable[[NDArray], NDArray],
|
|
186
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
187
187
|
x: ArrayLike,
|
|
188
188
|
dx: float = 1e-7,
|
|
189
189
|
) -> NDArray[np.floating]:
|
|
@@ -239,7 +239,7 @@ def numerical_jacobian(
|
|
|
239
239
|
def ekf_predict_auto(
|
|
240
240
|
x: ArrayLike,
|
|
241
241
|
P: ArrayLike,
|
|
242
|
-
f: Callable[[NDArray], NDArray],
|
|
242
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
243
243
|
Q: ArrayLike,
|
|
244
244
|
dx: float = 1e-7,
|
|
245
245
|
) -> KalmanPrediction:
|
|
@@ -273,7 +273,7 @@ def ekf_update_auto(
|
|
|
273
273
|
x: ArrayLike,
|
|
274
274
|
P: ArrayLike,
|
|
275
275
|
z: ArrayLike,
|
|
276
|
-
h: Callable[[NDArray], NDArray],
|
|
276
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
277
277
|
R: ArrayLike,
|
|
278
278
|
dx: float = 1e-7,
|
|
279
279
|
) -> KalmanUpdate:
|
|
@@ -309,8 +309,8 @@ def iterated_ekf_update(
|
|
|
309
309
|
x: ArrayLike,
|
|
310
310
|
P: ArrayLike,
|
|
311
311
|
z: ArrayLike,
|
|
312
|
-
h: Callable[[NDArray], NDArray],
|
|
313
|
-
H_func: Callable[[NDArray], NDArray],
|
|
312
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
313
|
+
H_func: Callable[[NDArray[Any]], NDArray[Any]],
|
|
314
314
|
R: ArrayLike,
|
|
315
315
|
max_iter: int = 10,
|
|
316
316
|
tol: float = 1e-6,
|
|
@@ -22,7 +22,7 @@ References
|
|
|
22
22
|
Proc. IEEE CDC, 1992.
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
|
-
from typing import NamedTuple, Optional
|
|
25
|
+
from typing import Any, Callable, NamedTuple, Optional
|
|
26
26
|
|
|
27
27
|
import numpy as np
|
|
28
28
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -379,7 +379,7 @@ def extended_hinf_update(
|
|
|
379
379
|
x: ArrayLike,
|
|
380
380
|
P: ArrayLike,
|
|
381
381
|
z: ArrayLike,
|
|
382
|
-
h:
|
|
382
|
+
h: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
383
383
|
H: ArrayLike,
|
|
384
384
|
R: ArrayLike,
|
|
385
385
|
gamma: float,
|
|
@@ -77,7 +77,9 @@ class SRKalmanUpdate(NamedTuple):
|
|
|
77
77
|
likelihood: float
|
|
78
78
|
|
|
79
79
|
|
|
80
|
-
def cholesky_update(
|
|
80
|
+
def cholesky_update(
|
|
81
|
+
S: NDArray[np.floating], v: NDArray[np.floating], sign: float = 1.0
|
|
82
|
+
) -> NDArray[np.floating]:
|
|
81
83
|
"""
|
|
82
84
|
Rank-1 Cholesky update/downdate.
|
|
83
85
|
|
|
@@ -148,7 +150,11 @@ def cholesky_update(S: NDArray, v: NDArray, sign: float = 1.0) -> NDArray:
|
|
|
148
150
|
return S
|
|
149
151
|
|
|
150
152
|
|
|
151
|
-
def qr_update(
|
|
153
|
+
def qr_update(
|
|
154
|
+
S_x: NDArray[np.floating],
|
|
155
|
+
S_noise: NDArray[np.floating],
|
|
156
|
+
F: Optional[NDArray[np.floating]] = None,
|
|
157
|
+
) -> NDArray[np.floating]:
|
|
152
158
|
"""
|
|
153
159
|
QR-based covariance square root update.
|
|
154
160
|
|
|
@@ -14,7 +14,7 @@ References
|
|
|
14
14
|
Estimation," Proceedings of the IEEE, 2004.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
from typing import Callable
|
|
17
|
+
from typing import Any, Callable
|
|
18
18
|
|
|
19
19
|
import numpy as np
|
|
20
20
|
import scipy.linalg
|
|
@@ -30,7 +30,7 @@ from pytcl.dynamic_estimation.kalman.square_root import (
|
|
|
30
30
|
def sr_ukf_predict(
|
|
31
31
|
x: ArrayLike,
|
|
32
32
|
S: ArrayLike,
|
|
33
|
-
f: Callable,
|
|
33
|
+
f: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
34
34
|
S_Q: ArrayLike,
|
|
35
35
|
alpha: float = 1e-3,
|
|
36
36
|
beta: float = 2.0,
|
|
@@ -143,7 +143,7 @@ def sr_ukf_update(
|
|
|
143
143
|
x: ArrayLike,
|
|
144
144
|
S: ArrayLike,
|
|
145
145
|
z: ArrayLike,
|
|
146
|
-
h: Callable,
|
|
146
|
+
h: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
147
147
|
S_R: ArrayLike,
|
|
148
148
|
alpha: float = 1e-3,
|
|
149
149
|
beta: float = 2.0,
|
|
@@ -41,7 +41,7 @@ class UDState(NamedTuple):
|
|
|
41
41
|
D: NDArray[np.floating]
|
|
42
42
|
|
|
43
43
|
|
|
44
|
-
def ud_factorize(P: ArrayLike) -> tuple[NDArray, NDArray]:
|
|
44
|
+
def ud_factorize(P: ArrayLike) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
45
45
|
"""
|
|
46
46
|
Compute U-D factorization of a symmetric positive definite matrix.
|
|
47
47
|
|
|
@@ -91,7 +91,7 @@ def ud_factorize(P: ArrayLike) -> tuple[NDArray, NDArray]:
|
|
|
91
91
|
return U, D
|
|
92
92
|
|
|
93
93
|
|
|
94
|
-
def ud_reconstruct(U: ArrayLike, D: ArrayLike) -> NDArray:
|
|
94
|
+
def ud_reconstruct(U: ArrayLike, D: ArrayLike) -> NDArray[np.floating]:
|
|
95
95
|
"""
|
|
96
96
|
Reconstruct covariance matrix from U-D factors.
|
|
97
97
|
|
|
@@ -128,7 +128,7 @@ def ud_predict(
|
|
|
128
128
|
D: ArrayLike,
|
|
129
129
|
F: ArrayLike,
|
|
130
130
|
Q: ArrayLike,
|
|
131
|
-
) -> tuple[NDArray, NDArray, NDArray]:
|
|
131
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
|
|
132
132
|
"""
|
|
133
133
|
U-D filter prediction step.
|
|
134
134
|
|
|
@@ -193,7 +193,7 @@ def ud_update_scalar(
|
|
|
193
193
|
z: float,
|
|
194
194
|
h: ArrayLike,
|
|
195
195
|
r: float,
|
|
196
|
-
) -> tuple[NDArray, NDArray, NDArray]:
|
|
196
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
|
|
197
197
|
"""
|
|
198
198
|
U-D filter scalar measurement update (Bierman's algorithm).
|
|
199
199
|
|
|
@@ -290,7 +290,13 @@ def ud_update(
|
|
|
290
290
|
z: ArrayLike,
|
|
291
291
|
H: ArrayLike,
|
|
292
292
|
R: ArrayLike,
|
|
293
|
-
) -> tuple[
|
|
293
|
+
) -> tuple[
|
|
294
|
+
NDArray[np.floating],
|
|
295
|
+
NDArray[np.floating],
|
|
296
|
+
NDArray[np.floating],
|
|
297
|
+
NDArray[np.floating],
|
|
298
|
+
float,
|
|
299
|
+
]:
|
|
294
300
|
"""
|
|
295
301
|
U-D filter vector measurement update.
|
|
296
302
|
|
|
@@ -5,7 +5,7 @@ The UKF uses the unscented transform to propagate the mean and covariance
|
|
|
5
5
|
through nonlinear functions without requiring Jacobian computation.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Callable, NamedTuple, Optional, Tuple
|
|
8
|
+
from typing import Any, Callable, NamedTuple, Optional, Tuple
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -226,7 +226,7 @@ def unscented_transform(
|
|
|
226
226
|
def ukf_predict(
|
|
227
227
|
x: ArrayLike,
|
|
228
228
|
P: ArrayLike,
|
|
229
|
-
f: Callable[[NDArray], NDArray],
|
|
229
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
230
230
|
Q: ArrayLike,
|
|
231
231
|
alpha: float = 1e-3,
|
|
232
232
|
beta: float = 2.0,
|
|
@@ -292,7 +292,7 @@ def ukf_update(
|
|
|
292
292
|
x: ArrayLike,
|
|
293
293
|
P: ArrayLike,
|
|
294
294
|
z: ArrayLike,
|
|
295
|
-
h: Callable[[NDArray], NDArray],
|
|
295
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
296
296
|
R: ArrayLike,
|
|
297
297
|
alpha: float = 1e-3,
|
|
298
298
|
beta: float = 2.0,
|
|
@@ -382,7 +382,9 @@ def ukf_update(
|
|
|
382
382
|
)
|
|
383
383
|
|
|
384
384
|
|
|
385
|
-
def ckf_spherical_cubature_points(
|
|
385
|
+
def ckf_spherical_cubature_points(
|
|
386
|
+
n: int,
|
|
387
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
386
388
|
"""
|
|
387
389
|
Generate cubature points for Cubature Kalman Filter.
|
|
388
390
|
|
|
@@ -418,7 +420,7 @@ def ckf_spherical_cubature_points(n: int) -> Tuple[NDArray, NDArray]:
|
|
|
418
420
|
def ckf_predict(
|
|
419
421
|
x: ArrayLike,
|
|
420
422
|
P: ArrayLike,
|
|
421
|
-
f: Callable[[NDArray], NDArray],
|
|
423
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
422
424
|
Q: ArrayLike,
|
|
423
425
|
) -> KalmanPrediction:
|
|
424
426
|
"""
|
|
@@ -487,7 +489,7 @@ def ckf_update(
|
|
|
487
489
|
x: ArrayLike,
|
|
488
490
|
P: ArrayLike,
|
|
489
491
|
z: ArrayLike,
|
|
490
|
-
h: Callable[[NDArray], NDArray],
|
|
492
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
491
493
|
R: ArrayLike,
|
|
492
494
|
) -> KalmanUpdate:
|
|
493
495
|
"""
|
|
@@ -5,7 +5,7 @@ This module provides particle filtering algorithms for nonlinear/non-Gaussian
|
|
|
5
5
|
state estimation.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Callable, NamedTuple, Optional, Tuple
|
|
8
|
+
from typing import Any, Callable, NamedTuple, Optional, Tuple
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from numba import njit
|
|
@@ -102,9 +102,9 @@ def resample_systematic(
|
|
|
102
102
|
|
|
103
103
|
@njit(cache=True)
|
|
104
104
|
def _resample_residual_deterministic(
|
|
105
|
-
particles: np.ndarray,
|
|
106
|
-
floor_Nw: np.ndarray,
|
|
107
|
-
) -> Tuple[np.ndarray, int]:
|
|
105
|
+
particles: np.ndarray[Any, Any],
|
|
106
|
+
floor_Nw: np.ndarray[Any, Any],
|
|
107
|
+
) -> Tuple[np.ndarray[Any, Any], int]:
|
|
108
108
|
"""JIT-compiled deterministic copy portion of residual resampling."""
|
|
109
109
|
N = particles.shape[0]
|
|
110
110
|
n = particles.shape[1]
|
|
@@ -196,8 +196,8 @@ def effective_sample_size(weights: NDArray[np.floating]) -> float:
|
|
|
196
196
|
|
|
197
197
|
def bootstrap_pf_predict(
|
|
198
198
|
particles: NDArray[np.floating],
|
|
199
|
-
f: Callable[[NDArray], NDArray],
|
|
200
|
-
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray],
|
|
199
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
200
|
+
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray[Any]],
|
|
201
201
|
rng: Optional[np.random.Generator] = None,
|
|
202
202
|
) -> NDArray[np.floating]:
|
|
203
203
|
"""
|
|
@@ -242,7 +242,7 @@ def bootstrap_pf_update(
|
|
|
242
242
|
particles: NDArray[np.floating],
|
|
243
243
|
weights: NDArray[np.floating],
|
|
244
244
|
z: ArrayLike,
|
|
245
|
-
likelihood_func: Callable[[NDArray, NDArray], float],
|
|
245
|
+
likelihood_func: Callable[[NDArray[Any], NDArray[Any]], float],
|
|
246
246
|
) -> Tuple[NDArray[np.floating], float]:
|
|
247
247
|
"""
|
|
248
248
|
Bootstrap particle filter update step.
|
|
@@ -328,9 +328,9 @@ def bootstrap_pf_step(
|
|
|
328
328
|
particles: NDArray[np.floating],
|
|
329
329
|
weights: NDArray[np.floating],
|
|
330
330
|
z: ArrayLike,
|
|
331
|
-
f: Callable[[NDArray], NDArray],
|
|
332
|
-
h: Callable[[NDArray], NDArray],
|
|
333
|
-
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray],
|
|
331
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
332
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
333
|
+
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray[Any]],
|
|
334
334
|
R: ArrayLike,
|
|
335
335
|
resample_threshold: float = 0.5,
|
|
336
336
|
resample_method: str = "systematic",
|
|
@@ -378,7 +378,7 @@ def bootstrap_pf_step(
|
|
|
378
378
|
particles_pred = bootstrap_pf_predict(particles, f, Q_sample, rng)
|
|
379
379
|
|
|
380
380
|
# Update
|
|
381
|
-
def likelihood_func(z, x):
|
|
381
|
+
def likelihood_func(z: NDArray[Any], x: NDArray[Any]) -> Any:
|
|
382
382
|
z_pred = h(x)
|
|
383
383
|
return gaussian_likelihood(z, z_pred, R)
|
|
384
384
|
|
|
@@ -426,10 +426,10 @@ def particle_mean(
|
|
|
426
426
|
|
|
427
427
|
@njit(cache=True)
|
|
428
428
|
def _particle_covariance_core(
|
|
429
|
-
particles: np.ndarray,
|
|
430
|
-
weights: np.ndarray,
|
|
431
|
-
mean: np.ndarray,
|
|
432
|
-
) -> np.ndarray:
|
|
429
|
+
particles: np.ndarray[Any, Any],
|
|
430
|
+
weights: np.ndarray[Any, Any],
|
|
431
|
+
mean: np.ndarray[Any, Any],
|
|
432
|
+
) -> np.ndarray[Any, Any]:
|
|
433
433
|
"""JIT-compiled core for particle covariance computation."""
|
|
434
434
|
N = particles.shape[0]
|
|
435
435
|
n = particles.shape[1]
|