nrl-tracker 1.7.0__py3-none-any.whl → 1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.1.dist-info}/METADATA +3 -2
- {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.1.dist-info}/RECORD +75 -75
- pytcl/__init__.py +2 -2
- pytcl/assignment_algorithms/__init__.py +15 -15
- pytcl/assignment_algorithms/gating.py +10 -10
- pytcl/assignment_algorithms/jpda.py +40 -40
- pytcl/assignment_algorithms/nd_assignment.py +5 -4
- pytcl/assignment_algorithms/network_flow.py +18 -8
- pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
- pytcl/astronomical/__init__.py +6 -6
- pytcl/astronomical/ephemerides.py +14 -11
- pytcl/astronomical/reference_frames.py +8 -4
- pytcl/astronomical/relativity.py +6 -5
- pytcl/astronomical/special_orbits.py +9 -13
- pytcl/atmosphere/__init__.py +6 -6
- pytcl/atmosphere/nrlmsise00.py +153 -152
- pytcl/clustering/dbscan.py +2 -2
- pytcl/clustering/gaussian_mixture.py +3 -3
- pytcl/clustering/hierarchical.py +15 -15
- pytcl/clustering/kmeans.py +4 -4
- pytcl/containers/base.py +3 -3
- pytcl/containers/cluster_set.py +12 -2
- pytcl/containers/covertree.py +5 -3
- pytcl/containers/rtree.py +1 -1
- pytcl/containers/vptree.py +4 -2
- pytcl/coordinate_systems/conversions/geodetic.py +31 -7
- pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
- pytcl/coordinate_systems/projections/projections.py +2 -2
- pytcl/coordinate_systems/rotations/rotations.py +10 -6
- pytcl/core/validation.py +3 -3
- pytcl/dynamic_estimation/__init__.py +16 -16
- pytcl/dynamic_estimation/gaussian_sum_filter.py +20 -38
- pytcl/dynamic_estimation/imm.py +14 -14
- pytcl/dynamic_estimation/kalman/__init__.py +1 -1
- pytcl/dynamic_estimation/kalman/constrained.py +35 -23
- pytcl/dynamic_estimation/kalman/extended.py +8 -8
- pytcl/dynamic_estimation/kalman/h_infinity.py +2 -2
- pytcl/dynamic_estimation/kalman/square_root.py +8 -2
- pytcl/dynamic_estimation/kalman/sr_ukf.py +3 -3
- pytcl/dynamic_estimation/kalman/ud_filter.py +11 -5
- pytcl/dynamic_estimation/kalman/unscented.py +8 -6
- pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
- pytcl/dynamic_estimation/rbpf.py +36 -40
- pytcl/gravity/spherical_harmonics.py +3 -3
- pytcl/gravity/tides.py +6 -6
- pytcl/logging_config.py +3 -3
- pytcl/magnetism/emm.py +10 -3
- pytcl/magnetism/wmm.py +4 -4
- pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
- pytcl/mathematical_functions/geometry/geometry.py +5 -5
- pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
- pytcl/mathematical_functions/signal_processing/detection.py +24 -24
- pytcl/mathematical_functions/signal_processing/filters.py +14 -14
- pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
- pytcl/mathematical_functions/special_functions/bessel.py +15 -3
- pytcl/mathematical_functions/special_functions/debye.py +5 -1
- pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
- pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
- pytcl/mathematical_functions/special_functions/hypergeometric.py +6 -4
- pytcl/mathematical_functions/transforms/fourier.py +8 -8
- pytcl/mathematical_functions/transforms/stft.py +12 -12
- pytcl/mathematical_functions/transforms/wavelets.py +9 -9
- pytcl/navigation/geodesy.py +3 -3
- pytcl/navigation/great_circle.py +5 -5
- pytcl/plotting/coordinates.py +7 -7
- pytcl/plotting/tracks.py +2 -2
- pytcl/static_estimation/maximum_likelihood.py +16 -14
- pytcl/static_estimation/robust.py +5 -5
- pytcl/terrain/loaders.py +5 -5
- pytcl/trackers/hypothesis.py +1 -1
- pytcl/trackers/mht.py +9 -9
- pytcl/trackers/multi_target.py +1 -1
- {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.1.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.1.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.7.0.dist-info → nrl_tracker-1.7.1.dist-info}/top_level.txt +0 -0
pytcl/dynamic_estimation/imm.py
CHANGED
|
@@ -12,7 +12,7 @@ The IMM algorithm consists of four steps:
|
|
|
12
12
|
4. Output combination
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
|
-
from typing import List, NamedTuple, Optional
|
|
15
|
+
from typing import Any, List, NamedTuple, Optional
|
|
16
16
|
|
|
17
17
|
import numpy as np
|
|
18
18
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -101,7 +101,7 @@ class IMMUpdate(NamedTuple):
|
|
|
101
101
|
def compute_mixing_probabilities(
|
|
102
102
|
mode_probs: ArrayLike,
|
|
103
103
|
transition_matrix: ArrayLike,
|
|
104
|
-
) -> tuple[NDArray, NDArray]:
|
|
104
|
+
) -> tuple[NDArray[Any], NDArray[Any]]:
|
|
105
105
|
"""
|
|
106
106
|
Compute mixing probabilities and predicted mode probabilities.
|
|
107
107
|
|
|
@@ -143,10 +143,10 @@ def compute_mixing_probabilities(
|
|
|
143
143
|
|
|
144
144
|
|
|
145
145
|
def mix_states(
|
|
146
|
-
mode_states: List[NDArray],
|
|
147
|
-
mode_covs: List[NDArray],
|
|
148
|
-
mixing_probs: NDArray,
|
|
149
|
-
) -> tuple[List[NDArray], List[NDArray]]:
|
|
146
|
+
mode_states: List[NDArray[Any]],
|
|
147
|
+
mode_covs: List[NDArray[Any]],
|
|
148
|
+
mixing_probs: NDArray[Any],
|
|
149
|
+
) -> tuple[List[NDArray[Any]], List[NDArray[Any]]]:
|
|
150
150
|
"""
|
|
151
151
|
Mix states and covariances for interaction step.
|
|
152
152
|
|
|
@@ -197,10 +197,10 @@ def mix_states(
|
|
|
197
197
|
|
|
198
198
|
|
|
199
199
|
def combine_estimates(
|
|
200
|
-
mode_states: List[NDArray],
|
|
201
|
-
mode_covs: List[NDArray],
|
|
202
|
-
mode_probs: NDArray,
|
|
203
|
-
) -> tuple[NDArray, NDArray]:
|
|
200
|
+
mode_states: List[NDArray[Any]],
|
|
201
|
+
mode_covs: List[NDArray[Any]],
|
|
202
|
+
mode_probs: NDArray[Any],
|
|
203
|
+
) -> tuple[NDArray[Any], NDArray[Any]]:
|
|
204
204
|
"""
|
|
205
205
|
Combine mode-conditioned estimates into overall estimate.
|
|
206
206
|
|
|
@@ -546,10 +546,10 @@ class IMMEstimator:
|
|
|
546
546
|
self.mode_covs = [np.eye(state_dim) for _ in range(n_modes)]
|
|
547
547
|
|
|
548
548
|
# Mode-specific models (must be set by user)
|
|
549
|
-
self.F_list: List[NDArray] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
550
|
-
self.Q_list: List[NDArray] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
551
|
-
self.H_list: List[NDArray] = []
|
|
552
|
-
self.R_list: List[NDArray] = []
|
|
549
|
+
self.F_list: List[NDArray[Any]] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
550
|
+
self.Q_list: List[NDArray[Any]] = [np.eye(state_dim) for _ in range(n_modes)]
|
|
551
|
+
self.H_list: List[NDArray[Any]] = []
|
|
552
|
+
self.R_list: List[NDArray[Any]] = []
|
|
553
553
|
|
|
554
554
|
# Combined estimates
|
|
555
555
|
self.x = np.zeros(state_dim)
|
|
@@ -13,21 +13,24 @@ References
|
|
|
13
13
|
density function truncation. Journal of Guidance, Control, and Dynamics.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
-
from typing import Callable, Optional
|
|
16
|
+
from typing import Any, Callable, Optional
|
|
17
17
|
|
|
18
18
|
import numpy as np
|
|
19
19
|
from numpy.typing import ArrayLike, NDArray
|
|
20
20
|
|
|
21
|
-
from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdate
|
|
22
21
|
from pytcl.dynamic_estimation.kalman.extended import ekf_predict, ekf_update
|
|
22
|
+
from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdate
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class ConstraintFunction:
|
|
26
26
|
"""Base class for state constraints."""
|
|
27
27
|
|
|
28
|
-
def __init__(
|
|
29
|
-
|
|
30
|
-
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
g: Callable[[NDArray[Any]], NDArray[Any]],
|
|
31
|
+
G: Optional[Callable[[NDArray[Any]], NDArray[Any]]] = None,
|
|
32
|
+
constraint_type: str = "inequality",
|
|
33
|
+
):
|
|
31
34
|
"""
|
|
32
35
|
Define a constraint: g(x) ≤ 0 (inequality) or g(x) = 0 (equality).
|
|
33
36
|
|
|
@@ -47,11 +50,11 @@ class ConstraintFunction:
|
|
|
47
50
|
self.G = G
|
|
48
51
|
self.constraint_type = constraint_type
|
|
49
52
|
|
|
50
|
-
def evaluate(self, x: NDArray) -> NDArray:
|
|
53
|
+
def evaluate(self, x: NDArray[Any]) -> NDArray[Any]:
|
|
51
54
|
"""Evaluate constraint at state x."""
|
|
52
55
|
return np.atleast_1d(np.asarray(self.g(x), dtype=np.float64))
|
|
53
56
|
|
|
54
|
-
def jacobian(self, x: NDArray) -> NDArray:
|
|
57
|
+
def jacobian(self, x: NDArray[Any]) -> NDArray[Any]:
|
|
55
58
|
"""Compute constraint Jacobian at x."""
|
|
56
59
|
if self.G is not None:
|
|
57
60
|
return np.atleast_2d(np.asarray(self.G(x), dtype=np.float64))
|
|
@@ -69,10 +72,10 @@ class ConstraintFunction:
|
|
|
69
72
|
J[:, i] = (g_plus - g_x) / eps
|
|
70
73
|
return J
|
|
71
74
|
|
|
72
|
-
def is_satisfied(self, x: NDArray, tol: float = 1e-6) -> bool:
|
|
75
|
+
def is_satisfied(self, x: NDArray[Any], tol: float = 1e-6) -> bool:
|
|
73
76
|
"""Check if constraint is satisfied."""
|
|
74
77
|
g_val = self.evaluate(x)
|
|
75
|
-
if self.constraint_type ==
|
|
78
|
+
if self.constraint_type == "inequality":
|
|
76
79
|
return np.all(g_val <= tol)
|
|
77
80
|
else: # equality
|
|
78
81
|
return np.allclose(g_val, 0, atol=tol)
|
|
@@ -91,11 +94,11 @@ class ConstrainedEKF:
|
|
|
91
94
|
List of active constraints.
|
|
92
95
|
"""
|
|
93
96
|
|
|
94
|
-
def __init__(self):
|
|
97
|
+
def __init__(self) -> None:
|
|
95
98
|
"""Initialize Constrained EKF."""
|
|
96
|
-
self.constraints = []
|
|
99
|
+
self.constraints: list[ConstraintFunction] = []
|
|
97
100
|
|
|
98
|
-
def add_constraint(self, constraint: ConstraintFunction):
|
|
101
|
+
def add_constraint(self, constraint: ConstraintFunction) -> None:
|
|
99
102
|
"""
|
|
100
103
|
Add a constraint to the filter.
|
|
101
104
|
|
|
@@ -110,7 +113,7 @@ class ConstrainedEKF:
|
|
|
110
113
|
self,
|
|
111
114
|
x: ArrayLike,
|
|
112
115
|
P: ArrayLike,
|
|
113
|
-
f: Callable[[NDArray], NDArray],
|
|
116
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
114
117
|
F: ArrayLike,
|
|
115
118
|
Q: ArrayLike,
|
|
116
119
|
) -> KalmanPrediction:
|
|
@@ -145,7 +148,7 @@ class ConstrainedEKF:
|
|
|
145
148
|
x: ArrayLike,
|
|
146
149
|
P: ArrayLike,
|
|
147
150
|
z: ArrayLike,
|
|
148
|
-
h: Callable[[NDArray], NDArray],
|
|
151
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
149
152
|
H: ArrayLike,
|
|
150
153
|
R: ArrayLike,
|
|
151
154
|
) -> KalmanUpdate:
|
|
@@ -184,15 +187,22 @@ class ConstrainedEKF:
|
|
|
184
187
|
if self.constraints:
|
|
185
188
|
x_upd, P_upd = self._project_onto_constraints(x_upd, P_upd)
|
|
186
189
|
|
|
187
|
-
return KalmanUpdate(
|
|
190
|
+
return KalmanUpdate(
|
|
191
|
+
x=x_upd,
|
|
192
|
+
P=P_upd,
|
|
193
|
+
y=result.y,
|
|
194
|
+
S=result.S,
|
|
195
|
+
K=result.K,
|
|
196
|
+
likelihood=result.likelihood,
|
|
197
|
+
)
|
|
188
198
|
|
|
189
199
|
def _project_onto_constraints(
|
|
190
200
|
self,
|
|
191
|
-
x: NDArray,
|
|
192
|
-
P: NDArray,
|
|
201
|
+
x: NDArray[Any],
|
|
202
|
+
P: NDArray[Any],
|
|
193
203
|
max_iter: int = 10,
|
|
194
204
|
tol: float = 1e-6,
|
|
195
|
-
) -> tuple[NDArray, NDArray]:
|
|
205
|
+
) -> tuple[NDArray[Any], NDArray[Any]]:
|
|
196
206
|
"""
|
|
197
207
|
Project state and covariance onto constraint manifold.
|
|
198
208
|
|
|
@@ -222,7 +232,9 @@ class ConstrainedEKF:
|
|
|
222
232
|
P_proj = P.copy()
|
|
223
233
|
|
|
224
234
|
# Check which constraints are violated
|
|
225
|
-
violated = [
|
|
235
|
+
violated: list[ConstraintFunction] = [
|
|
236
|
+
c for c in self.constraints if not c.is_satisfied(x_proj)
|
|
237
|
+
]
|
|
226
238
|
|
|
227
239
|
if not violated:
|
|
228
240
|
return x_proj, P_proj
|
|
@@ -236,7 +248,7 @@ class ConstrainedEKF:
|
|
|
236
248
|
G = constraint.jacobian(x_proj)
|
|
237
249
|
|
|
238
250
|
# Only process violated constraints
|
|
239
|
-
if constraint.constraint_type ==
|
|
251
|
+
if constraint.constraint_type == "inequality":
|
|
240
252
|
mask = g_val > tol
|
|
241
253
|
else:
|
|
242
254
|
mask = np.abs(g_val) > tol
|
|
@@ -292,7 +304,7 @@ class ConstrainedEKF:
|
|
|
292
304
|
def constrained_ekf_predict(
|
|
293
305
|
x: ArrayLike,
|
|
294
306
|
P: ArrayLike,
|
|
295
|
-
f: Callable[[NDArray], NDArray],
|
|
307
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
296
308
|
F: ArrayLike,
|
|
297
309
|
Q: ArrayLike,
|
|
298
310
|
) -> KalmanPrediction:
|
|
@@ -325,10 +337,10 @@ def constrained_ekf_update(
|
|
|
325
337
|
x: ArrayLike,
|
|
326
338
|
P: ArrayLike,
|
|
327
339
|
z: ArrayLike,
|
|
328
|
-
h: Callable[[NDArray], NDArray],
|
|
340
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
329
341
|
H: ArrayLike,
|
|
330
342
|
R: ArrayLike,
|
|
331
|
-
constraints: Optional[list] = None,
|
|
343
|
+
constraints: Optional[list[ConstraintFunction]] = None,
|
|
332
344
|
) -> KalmanUpdate:
|
|
333
345
|
"""
|
|
334
346
|
Convenience function for constrained EKF update.
|
|
@@ -5,7 +5,7 @@ The EKF handles nonlinear dynamics and/or measurements by linearizing
|
|
|
5
5
|
around the current state estimate using Jacobians.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Callable
|
|
8
|
+
from typing import Any, Callable
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -16,7 +16,7 @@ from pytcl.dynamic_estimation.kalman.linear import KalmanPrediction, KalmanUpdat
|
|
|
16
16
|
def ekf_predict(
|
|
17
17
|
x: ArrayLike,
|
|
18
18
|
P: ArrayLike,
|
|
19
|
-
f: Callable[[NDArray], NDArray],
|
|
19
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
20
20
|
F: ArrayLike,
|
|
21
21
|
Q: ArrayLike,
|
|
22
22
|
) -> KalmanPrediction:
|
|
@@ -86,7 +86,7 @@ def ekf_update(
|
|
|
86
86
|
x: ArrayLike,
|
|
87
87
|
P: ArrayLike,
|
|
88
88
|
z: ArrayLike,
|
|
89
|
-
h: Callable[[NDArray], NDArray],
|
|
89
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
90
90
|
H: ArrayLike,
|
|
91
91
|
R: ArrayLike,
|
|
92
92
|
) -> KalmanUpdate:
|
|
@@ -183,7 +183,7 @@ def ekf_update(
|
|
|
183
183
|
|
|
184
184
|
|
|
185
185
|
def numerical_jacobian(
|
|
186
|
-
f: Callable[[NDArray], NDArray],
|
|
186
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
187
187
|
x: ArrayLike,
|
|
188
188
|
dx: float = 1e-7,
|
|
189
189
|
) -> NDArray[np.floating]:
|
|
@@ -239,7 +239,7 @@ def numerical_jacobian(
|
|
|
239
239
|
def ekf_predict_auto(
|
|
240
240
|
x: ArrayLike,
|
|
241
241
|
P: ArrayLike,
|
|
242
|
-
f: Callable[[NDArray], NDArray],
|
|
242
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
243
243
|
Q: ArrayLike,
|
|
244
244
|
dx: float = 1e-7,
|
|
245
245
|
) -> KalmanPrediction:
|
|
@@ -273,7 +273,7 @@ def ekf_update_auto(
|
|
|
273
273
|
x: ArrayLike,
|
|
274
274
|
P: ArrayLike,
|
|
275
275
|
z: ArrayLike,
|
|
276
|
-
h: Callable[[NDArray], NDArray],
|
|
276
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
277
277
|
R: ArrayLike,
|
|
278
278
|
dx: float = 1e-7,
|
|
279
279
|
) -> KalmanUpdate:
|
|
@@ -309,8 +309,8 @@ def iterated_ekf_update(
|
|
|
309
309
|
x: ArrayLike,
|
|
310
310
|
P: ArrayLike,
|
|
311
311
|
z: ArrayLike,
|
|
312
|
-
h: Callable[[NDArray], NDArray],
|
|
313
|
-
H_func: Callable[[NDArray], NDArray],
|
|
312
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
313
|
+
H_func: Callable[[NDArray[Any]], NDArray[Any]],
|
|
314
314
|
R: ArrayLike,
|
|
315
315
|
max_iter: int = 10,
|
|
316
316
|
tol: float = 1e-6,
|
|
@@ -22,7 +22,7 @@ References
|
|
|
22
22
|
Proc. IEEE CDC, 1992.
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
|
-
from typing import Callable, NamedTuple, Optional
|
|
25
|
+
from typing import Any, Callable, NamedTuple, Optional
|
|
26
26
|
|
|
27
27
|
import numpy as np
|
|
28
28
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -379,7 +379,7 @@ def extended_hinf_update(
|
|
|
379
379
|
x: ArrayLike,
|
|
380
380
|
P: ArrayLike,
|
|
381
381
|
z: ArrayLike,
|
|
382
|
-
h: Callable,
|
|
382
|
+
h: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
383
383
|
H: ArrayLike,
|
|
384
384
|
R: ArrayLike,
|
|
385
385
|
gamma: float,
|
|
@@ -77,7 +77,9 @@ class SRKalmanUpdate(NamedTuple):
|
|
|
77
77
|
likelihood: float
|
|
78
78
|
|
|
79
79
|
|
|
80
|
-
def cholesky_update(
|
|
80
|
+
def cholesky_update(
|
|
81
|
+
S: NDArray[np.floating], v: NDArray[np.floating], sign: float = 1.0
|
|
82
|
+
) -> NDArray[np.floating]:
|
|
81
83
|
"""
|
|
82
84
|
Rank-1 Cholesky update/downdate.
|
|
83
85
|
|
|
@@ -148,7 +150,11 @@ def cholesky_update(S: NDArray, v: NDArray, sign: float = 1.0) -> NDArray:
|
|
|
148
150
|
return S
|
|
149
151
|
|
|
150
152
|
|
|
151
|
-
def qr_update(
|
|
153
|
+
def qr_update(
|
|
154
|
+
S_x: NDArray[np.floating],
|
|
155
|
+
S_noise: NDArray[np.floating],
|
|
156
|
+
F: Optional[NDArray[np.floating]] = None,
|
|
157
|
+
) -> NDArray[np.floating]:
|
|
152
158
|
"""
|
|
153
159
|
QR-based covariance square root update.
|
|
154
160
|
|
|
@@ -14,7 +14,7 @@ References
|
|
|
14
14
|
Estimation," Proceedings of the IEEE, 2004.
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
-
from typing import Callable
|
|
17
|
+
from typing import Any, Callable
|
|
18
18
|
|
|
19
19
|
import numpy as np
|
|
20
20
|
import scipy.linalg
|
|
@@ -30,7 +30,7 @@ from pytcl.dynamic_estimation.kalman.square_root import (
|
|
|
30
30
|
def sr_ukf_predict(
|
|
31
31
|
x: ArrayLike,
|
|
32
32
|
S: ArrayLike,
|
|
33
|
-
f: Callable,
|
|
33
|
+
f: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
34
34
|
S_Q: ArrayLike,
|
|
35
35
|
alpha: float = 1e-3,
|
|
36
36
|
beta: float = 2.0,
|
|
@@ -143,7 +143,7 @@ def sr_ukf_update(
|
|
|
143
143
|
x: ArrayLike,
|
|
144
144
|
S: ArrayLike,
|
|
145
145
|
z: ArrayLike,
|
|
146
|
-
h: Callable,
|
|
146
|
+
h: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
|
|
147
147
|
S_R: ArrayLike,
|
|
148
148
|
alpha: float = 1e-3,
|
|
149
149
|
beta: float = 2.0,
|
|
@@ -41,7 +41,7 @@ class UDState(NamedTuple):
|
|
|
41
41
|
D: NDArray[np.floating]
|
|
42
42
|
|
|
43
43
|
|
|
44
|
-
def ud_factorize(P: ArrayLike) -> tuple[NDArray, NDArray]:
|
|
44
|
+
def ud_factorize(P: ArrayLike) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
45
45
|
"""
|
|
46
46
|
Compute U-D factorization of a symmetric positive definite matrix.
|
|
47
47
|
|
|
@@ -91,7 +91,7 @@ def ud_factorize(P: ArrayLike) -> tuple[NDArray, NDArray]:
|
|
|
91
91
|
return U, D
|
|
92
92
|
|
|
93
93
|
|
|
94
|
-
def ud_reconstruct(U: ArrayLike, D: ArrayLike) -> NDArray:
|
|
94
|
+
def ud_reconstruct(U: ArrayLike, D: ArrayLike) -> NDArray[np.floating]:
|
|
95
95
|
"""
|
|
96
96
|
Reconstruct covariance matrix from U-D factors.
|
|
97
97
|
|
|
@@ -128,7 +128,7 @@ def ud_predict(
|
|
|
128
128
|
D: ArrayLike,
|
|
129
129
|
F: ArrayLike,
|
|
130
130
|
Q: ArrayLike,
|
|
131
|
-
) -> tuple[NDArray, NDArray, NDArray]:
|
|
131
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
|
|
132
132
|
"""
|
|
133
133
|
U-D filter prediction step.
|
|
134
134
|
|
|
@@ -193,7 +193,7 @@ def ud_update_scalar(
|
|
|
193
193
|
z: float,
|
|
194
194
|
h: ArrayLike,
|
|
195
195
|
r: float,
|
|
196
|
-
) -> tuple[NDArray, NDArray, NDArray]:
|
|
196
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
|
|
197
197
|
"""
|
|
198
198
|
U-D filter scalar measurement update (Bierman's algorithm).
|
|
199
199
|
|
|
@@ -290,7 +290,13 @@ def ud_update(
|
|
|
290
290
|
z: ArrayLike,
|
|
291
291
|
H: ArrayLike,
|
|
292
292
|
R: ArrayLike,
|
|
293
|
-
) -> tuple[
|
|
293
|
+
) -> tuple[
|
|
294
|
+
NDArray[np.floating],
|
|
295
|
+
NDArray[np.floating],
|
|
296
|
+
NDArray[np.floating],
|
|
297
|
+
NDArray[np.floating],
|
|
298
|
+
float,
|
|
299
|
+
]:
|
|
294
300
|
"""
|
|
295
301
|
U-D filter vector measurement update.
|
|
296
302
|
|
|
@@ -5,7 +5,7 @@ The UKF uses the unscented transform to propagate the mean and covariance
|
|
|
5
5
|
through nonlinear functions without requiring Jacobian computation.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Callable, NamedTuple, Optional, Tuple
|
|
8
|
+
from typing import Any, Callable, NamedTuple, Optional, Tuple
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from numpy.typing import ArrayLike, NDArray
|
|
@@ -226,7 +226,7 @@ def unscented_transform(
|
|
|
226
226
|
def ukf_predict(
|
|
227
227
|
x: ArrayLike,
|
|
228
228
|
P: ArrayLike,
|
|
229
|
-
f: Callable[[NDArray], NDArray],
|
|
229
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
230
230
|
Q: ArrayLike,
|
|
231
231
|
alpha: float = 1e-3,
|
|
232
232
|
beta: float = 2.0,
|
|
@@ -292,7 +292,7 @@ def ukf_update(
|
|
|
292
292
|
x: ArrayLike,
|
|
293
293
|
P: ArrayLike,
|
|
294
294
|
z: ArrayLike,
|
|
295
|
-
h: Callable[[NDArray], NDArray],
|
|
295
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
296
296
|
R: ArrayLike,
|
|
297
297
|
alpha: float = 1e-3,
|
|
298
298
|
beta: float = 2.0,
|
|
@@ -382,7 +382,9 @@ def ukf_update(
|
|
|
382
382
|
)
|
|
383
383
|
|
|
384
384
|
|
|
385
|
-
def ckf_spherical_cubature_points(
|
|
385
|
+
def ckf_spherical_cubature_points(
|
|
386
|
+
n: int,
|
|
387
|
+
) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
|
|
386
388
|
"""
|
|
387
389
|
Generate cubature points for Cubature Kalman Filter.
|
|
388
390
|
|
|
@@ -418,7 +420,7 @@ def ckf_spherical_cubature_points(n: int) -> Tuple[NDArray, NDArray]:
|
|
|
418
420
|
def ckf_predict(
|
|
419
421
|
x: ArrayLike,
|
|
420
422
|
P: ArrayLike,
|
|
421
|
-
f: Callable[[NDArray], NDArray],
|
|
423
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
422
424
|
Q: ArrayLike,
|
|
423
425
|
) -> KalmanPrediction:
|
|
424
426
|
"""
|
|
@@ -487,7 +489,7 @@ def ckf_update(
|
|
|
487
489
|
x: ArrayLike,
|
|
488
490
|
P: ArrayLike,
|
|
489
491
|
z: ArrayLike,
|
|
490
|
-
h: Callable[[NDArray], NDArray],
|
|
492
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
491
493
|
R: ArrayLike,
|
|
492
494
|
) -> KalmanUpdate:
|
|
493
495
|
"""
|
|
@@ -5,7 +5,7 @@ This module provides particle filtering algorithms for nonlinear/non-Gaussian
|
|
|
5
5
|
state estimation.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import Callable, NamedTuple, Optional, Tuple
|
|
8
|
+
from typing import Any, Callable, NamedTuple, Optional, Tuple
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from numba import njit
|
|
@@ -102,9 +102,9 @@ def resample_systematic(
|
|
|
102
102
|
|
|
103
103
|
@njit(cache=True)
|
|
104
104
|
def _resample_residual_deterministic(
|
|
105
|
-
particles: np.ndarray,
|
|
106
|
-
floor_Nw: np.ndarray,
|
|
107
|
-
) -> Tuple[np.ndarray, int]:
|
|
105
|
+
particles: np.ndarray[Any, Any],
|
|
106
|
+
floor_Nw: np.ndarray[Any, Any],
|
|
107
|
+
) -> Tuple[np.ndarray[Any, Any], int]:
|
|
108
108
|
"""JIT-compiled deterministic copy portion of residual resampling."""
|
|
109
109
|
N = particles.shape[0]
|
|
110
110
|
n = particles.shape[1]
|
|
@@ -196,8 +196,8 @@ def effective_sample_size(weights: NDArray[np.floating]) -> float:
|
|
|
196
196
|
|
|
197
197
|
def bootstrap_pf_predict(
|
|
198
198
|
particles: NDArray[np.floating],
|
|
199
|
-
f: Callable[[NDArray], NDArray],
|
|
200
|
-
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray],
|
|
199
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
200
|
+
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray[Any]],
|
|
201
201
|
rng: Optional[np.random.Generator] = None,
|
|
202
202
|
) -> NDArray[np.floating]:
|
|
203
203
|
"""
|
|
@@ -242,7 +242,7 @@ def bootstrap_pf_update(
|
|
|
242
242
|
particles: NDArray[np.floating],
|
|
243
243
|
weights: NDArray[np.floating],
|
|
244
244
|
z: ArrayLike,
|
|
245
|
-
likelihood_func: Callable[[NDArray, NDArray], float],
|
|
245
|
+
likelihood_func: Callable[[NDArray[Any], NDArray[Any]], float],
|
|
246
246
|
) -> Tuple[NDArray[np.floating], float]:
|
|
247
247
|
"""
|
|
248
248
|
Bootstrap particle filter update step.
|
|
@@ -328,9 +328,9 @@ def bootstrap_pf_step(
|
|
|
328
328
|
particles: NDArray[np.floating],
|
|
329
329
|
weights: NDArray[np.floating],
|
|
330
330
|
z: ArrayLike,
|
|
331
|
-
f: Callable[[NDArray], NDArray],
|
|
332
|
-
h: Callable[[NDArray], NDArray],
|
|
333
|
-
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray],
|
|
331
|
+
f: Callable[[NDArray[Any]], NDArray[Any]],
|
|
332
|
+
h: Callable[[NDArray[Any]], NDArray[Any]],
|
|
333
|
+
Q_sample: Callable[[int, Optional[np.random.Generator]], NDArray[Any]],
|
|
334
334
|
R: ArrayLike,
|
|
335
335
|
resample_threshold: float = 0.5,
|
|
336
336
|
resample_method: str = "systematic",
|
|
@@ -378,7 +378,7 @@ def bootstrap_pf_step(
|
|
|
378
378
|
particles_pred = bootstrap_pf_predict(particles, f, Q_sample, rng)
|
|
379
379
|
|
|
380
380
|
# Update
|
|
381
|
-
def likelihood_func(z, x):
|
|
381
|
+
def likelihood_func(z: NDArray[Any], x: NDArray[Any]) -> Any:
|
|
382
382
|
z_pred = h(x)
|
|
383
383
|
return gaussian_likelihood(z, z_pred, R)
|
|
384
384
|
|
|
@@ -426,10 +426,10 @@ def particle_mean(
|
|
|
426
426
|
|
|
427
427
|
@njit(cache=True)
|
|
428
428
|
def _particle_covariance_core(
|
|
429
|
-
particles: np.ndarray,
|
|
430
|
-
weights: np.ndarray,
|
|
431
|
-
mean: np.ndarray,
|
|
432
|
-
) -> np.ndarray:
|
|
429
|
+
particles: np.ndarray[Any, Any],
|
|
430
|
+
weights: np.ndarray[Any, Any],
|
|
431
|
+
mean: np.ndarray[Any, Any],
|
|
432
|
+
) -> np.ndarray[Any, Any]:
|
|
433
433
|
"""JIT-compiled core for particle covariance computation."""
|
|
434
434
|
N = particles.shape[0]
|
|
435
435
|
n = particles.shape[1]
|