nrl-tracker 0.21.4__py3-none-any.whl → 1.7.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/METADATA +57 -10
  2. nrl_tracker-1.7.5.dist-info/RECORD +165 -0
  3. pytcl/__init__.py +4 -3
  4. pytcl/assignment_algorithms/__init__.py +28 -0
  5. pytcl/assignment_algorithms/data_association.py +2 -7
  6. pytcl/assignment_algorithms/gating.py +10 -10
  7. pytcl/assignment_algorithms/jpda.py +40 -40
  8. pytcl/assignment_algorithms/nd_assignment.py +379 -0
  9. pytcl/assignment_algorithms/network_flow.py +371 -0
  10. pytcl/assignment_algorithms/three_dimensional/assignment.py +3 -3
  11. pytcl/astronomical/__init__.py +162 -8
  12. pytcl/astronomical/ephemerides.py +533 -0
  13. pytcl/astronomical/reference_frames.py +865 -56
  14. pytcl/astronomical/relativity.py +473 -0
  15. pytcl/astronomical/sgp4.py +710 -0
  16. pytcl/astronomical/special_orbits.py +532 -0
  17. pytcl/astronomical/tle.py +558 -0
  18. pytcl/atmosphere/__init__.py +45 -3
  19. pytcl/atmosphere/ionosphere.py +512 -0
  20. pytcl/atmosphere/nrlmsise00.py +809 -0
  21. pytcl/clustering/dbscan.py +2 -2
  22. pytcl/clustering/gaussian_mixture.py +3 -3
  23. pytcl/clustering/hierarchical.py +15 -15
  24. pytcl/clustering/kmeans.py +4 -4
  25. pytcl/containers/__init__.py +28 -21
  26. pytcl/containers/base.py +219 -0
  27. pytcl/containers/cluster_set.py +2 -1
  28. pytcl/containers/covertree.py +26 -29
  29. pytcl/containers/kd_tree.py +94 -29
  30. pytcl/containers/measurement_set.py +1 -9
  31. pytcl/containers/rtree.py +200 -1
  32. pytcl/containers/vptree.py +21 -28
  33. pytcl/coordinate_systems/conversions/geodetic.py +272 -5
  34. pytcl/coordinate_systems/jacobians/jacobians.py +2 -2
  35. pytcl/coordinate_systems/projections/__init__.py +4 -2
  36. pytcl/coordinate_systems/projections/projections.py +2 -2
  37. pytcl/coordinate_systems/rotations/rotations.py +10 -6
  38. pytcl/core/__init__.py +18 -0
  39. pytcl/core/validation.py +333 -2
  40. pytcl/dynamic_estimation/__init__.py +26 -0
  41. pytcl/dynamic_estimation/gaussian_sum_filter.py +434 -0
  42. pytcl/dynamic_estimation/imm.py +15 -18
  43. pytcl/dynamic_estimation/kalman/__init__.py +30 -0
  44. pytcl/dynamic_estimation/kalman/constrained.py +382 -0
  45. pytcl/dynamic_estimation/kalman/extended.py +9 -12
  46. pytcl/dynamic_estimation/kalman/h_infinity.py +613 -0
  47. pytcl/dynamic_estimation/kalman/square_root.py +60 -573
  48. pytcl/dynamic_estimation/kalman/sr_ukf.py +302 -0
  49. pytcl/dynamic_estimation/kalman/ud_filter.py +410 -0
  50. pytcl/dynamic_estimation/kalman/unscented.py +9 -10
  51. pytcl/dynamic_estimation/particle_filters/bootstrap.py +15 -15
  52. pytcl/dynamic_estimation/rbpf.py +589 -0
  53. pytcl/dynamic_estimation/smoothers.py +1 -5
  54. pytcl/dynamic_models/discrete_time/__init__.py +1 -5
  55. pytcl/dynamic_models/process_noise/__init__.py +1 -5
  56. pytcl/gravity/egm.py +13 -0
  57. pytcl/gravity/spherical_harmonics.py +98 -37
  58. pytcl/gravity/tides.py +6 -6
  59. pytcl/logging_config.py +328 -0
  60. pytcl/magnetism/__init__.py +10 -14
  61. pytcl/magnetism/emm.py +10 -3
  62. pytcl/magnetism/wmm.py +260 -23
  63. pytcl/mathematical_functions/combinatorics/combinatorics.py +5 -5
  64. pytcl/mathematical_functions/geometry/geometry.py +5 -5
  65. pytcl/mathematical_functions/interpolation/__init__.py +2 -2
  66. pytcl/mathematical_functions/numerical_integration/quadrature.py +6 -6
  67. pytcl/mathematical_functions/signal_processing/detection.py +24 -24
  68. pytcl/mathematical_functions/signal_processing/filters.py +14 -14
  69. pytcl/mathematical_functions/signal_processing/matched_filter.py +12 -12
  70. pytcl/mathematical_functions/special_functions/__init__.py +2 -2
  71. pytcl/mathematical_functions/special_functions/bessel.py +15 -3
  72. pytcl/mathematical_functions/special_functions/debye.py +136 -26
  73. pytcl/mathematical_functions/special_functions/error_functions.py +3 -1
  74. pytcl/mathematical_functions/special_functions/gamma_functions.py +4 -4
  75. pytcl/mathematical_functions/special_functions/hypergeometric.py +81 -15
  76. pytcl/mathematical_functions/transforms/fourier.py +8 -8
  77. pytcl/mathematical_functions/transforms/stft.py +12 -12
  78. pytcl/mathematical_functions/transforms/wavelets.py +9 -9
  79. pytcl/navigation/__init__.py +14 -10
  80. pytcl/navigation/geodesy.py +246 -160
  81. pytcl/navigation/great_circle.py +101 -19
  82. pytcl/navigation/ins.py +1 -5
  83. pytcl/plotting/coordinates.py +7 -7
  84. pytcl/plotting/tracks.py +2 -2
  85. pytcl/static_estimation/maximum_likelihood.py +16 -14
  86. pytcl/static_estimation/robust.py +5 -5
  87. pytcl/terrain/loaders.py +5 -5
  88. pytcl/trackers/__init__.py +3 -14
  89. pytcl/trackers/hypothesis.py +1 -1
  90. pytcl/trackers/mht.py +9 -9
  91. pytcl/trackers/multi_target.py +2 -5
  92. nrl_tracker-0.21.4.dist-info/RECORD +0 -148
  93. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/LICENSE +0 -0
  94. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/WHEEL +0 -0
  95. {nrl_tracker-0.21.4.dist-info → nrl_tracker-1.7.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,302 @@
1
+ """
2
+ Square-root Unscented Kalman Filter (SR-UKF).
3
+
4
+ The SR-UKF propagates the square root of the covariance matrix directly,
5
+ providing improved numerical stability for the Unscented Kalman Filter.
6
+ This is particularly important for nonlinear systems with high-dimensional
7
+ state spaces.
8
+
9
+ References
10
+ ----------
11
+ .. [1] R. van der Merwe and E. A. Wan, "The Square-Root Unscented Kalman
12
+ Filter for State and Parameter-Estimation," ICASSP 2001.
13
+ .. [2] S. J. Julier and J. K. Uhlmann, "Unscented Filtering and Nonlinear
14
+ Estimation," Proceedings of the IEEE, 2004.
15
+ """
16
+
17
+ from typing import Any, Callable
18
+
19
+ import numpy as np
20
+ import scipy.linalg
21
+ from numpy.typing import ArrayLike
22
+
23
+ from pytcl.dynamic_estimation.kalman.square_root import (
24
+ SRKalmanPrediction,
25
+ SRKalmanUpdate,
26
+ cholesky_update,
27
+ )
28
+
29
+
30
+ def sr_ukf_predict(
31
+ x: ArrayLike,
32
+ S: ArrayLike,
33
+ f: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
34
+ S_Q: ArrayLike,
35
+ alpha: float = 1e-3,
36
+ beta: float = 2.0,
37
+ kappa: float = 0.0,
38
+ ) -> SRKalmanPrediction:
39
+ """
40
+ Square-root Unscented Kalman Filter prediction step.
41
+
42
+ Parameters
43
+ ----------
44
+ x : array_like
45
+ Current state estimate, shape (n,).
46
+ S : array_like
47
+ Lower triangular Cholesky factor of covariance, shape (n, n).
48
+ f : callable
49
+ State transition function f(x) -> x_next.
50
+ S_Q : array_like
51
+ Cholesky factor of process noise covariance.
52
+ alpha : float, optional
53
+ Spread of sigma points around mean. Default 1e-3.
54
+ beta : float, optional
55
+ Prior knowledge about distribution. Default 2.0 (Gaussian).
56
+ kappa : float, optional
57
+ Secondary scaling parameter. Default 0.0.
58
+
59
+ Returns
60
+ -------
61
+ result : SRKalmanPrediction
62
+ Predicted state and Cholesky factor.
63
+
64
+ Examples
65
+ --------
66
+ >>> import numpy as np
67
+ >>> def f(x):
68
+ ... return np.array([x[0] + x[1], x[1]])
69
+ >>> x = np.array([1.0, 0.5])
70
+ >>> S = np.linalg.cholesky(np.eye(2) * 0.1)
71
+ >>> S_Q = np.linalg.cholesky(np.eye(2) * 0.01)
72
+ >>> pred = sr_ukf_predict(x, S, f, S_Q)
73
+
74
+ See Also
75
+ --------
76
+ sr_ukf_update : Measurement update step.
77
+ """
78
+ x = np.asarray(x, dtype=np.float64).flatten()
79
+ S = np.asarray(S, dtype=np.float64)
80
+ S_Q = np.asarray(S_Q, dtype=np.float64)
81
+ n = len(x)
82
+
83
+ # Sigma point parameters
84
+ lam = alpha**2 * (n + kappa) - n
85
+ gamma = np.sqrt(n + lam)
86
+
87
+ # Weights
88
+ W_m = np.zeros(2 * n + 1)
89
+ W_c = np.zeros(2 * n + 1)
90
+ W_m[0] = lam / (n + lam)
91
+ W_c[0] = lam / (n + lam) + (1 - alpha**2 + beta)
92
+ for i in range(1, 2 * n + 1):
93
+ W_m[i] = 1 / (2 * (n + lam))
94
+ W_c[i] = 1 / (2 * (n + lam))
95
+
96
+ # Generate sigma points
97
+ sigma_points = np.zeros((n, 2 * n + 1))
98
+ sigma_points[:, 0] = x
99
+ for i in range(n):
100
+ sigma_points[:, i + 1] = x + gamma * S[:, i]
101
+ sigma_points[:, n + i + 1] = x - gamma * S[:, i]
102
+
103
+ # Propagate sigma points
104
+ sigma_points_pred = np.zeros_like(sigma_points)
105
+ for i in range(2 * n + 1):
106
+ sigma_points_pred[:, i] = f(sigma_points[:, i])
107
+
108
+ # Predicted mean
109
+ x_pred = np.sum(W_m * sigma_points_pred, axis=1)
110
+
111
+ # Predicted covariance square root via QR
112
+ # Build matrix for QR: [sqrt(W_c[1]) * (X - x_mean), S_Q]
113
+ residuals = sigma_points_pred[:, 1:] - x_pred[:, np.newaxis]
114
+ sqrt_Wc = np.sqrt(np.abs(W_c[1:]))
115
+ weighted_residuals = residuals * sqrt_Wc
116
+
117
+ compound = np.hstack([weighted_residuals, S_Q]).T
118
+ _, R = np.linalg.qr(compound)
119
+ S_pred = R[:n, :n].T
120
+
121
+ # Handle negative weight for mean point
122
+ if W_c[0] < 0:
123
+ # Downdate for the mean point
124
+ v = sigma_points_pred[:, 0] - x_pred
125
+ try:
126
+ S_pred = cholesky_update(S_pred, np.sqrt(np.abs(W_c[0])) * v, sign=-1.0)
127
+ except ValueError:
128
+ # Fall back to direct computation
129
+ pass
130
+ else:
131
+ v = sigma_points_pred[:, 0] - x_pred
132
+ S_pred = cholesky_update(S_pred, np.sqrt(W_c[0]) * v, sign=1.0)
133
+
134
+ # Ensure lower triangular with positive diagonal
135
+ for i in range(n):
136
+ if S_pred[i, i] < 0:
137
+ S_pred[i:, i] = -S_pred[i:, i]
138
+
139
+ return SRKalmanPrediction(x=x_pred, S=S_pred)
140
+
141
+
142
+ def sr_ukf_update(
143
+ x: ArrayLike,
144
+ S: ArrayLike,
145
+ z: ArrayLike,
146
+ h: Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]],
147
+ S_R: ArrayLike,
148
+ alpha: float = 1e-3,
149
+ beta: float = 2.0,
150
+ kappa: float = 0.0,
151
+ ) -> SRKalmanUpdate:
152
+ """
153
+ Square-root Unscented Kalman Filter update step.
154
+
155
+ Parameters
156
+ ----------
157
+ x : array_like
158
+ Predicted state estimate, shape (n,).
159
+ S : array_like
160
+ Lower triangular Cholesky factor of covariance, shape (n, n).
161
+ z : array_like
162
+ Measurement, shape (m,).
163
+ h : callable
164
+ Measurement function h(x) -> z.
165
+ S_R : array_like
166
+ Cholesky factor of measurement noise covariance.
167
+ alpha : float, optional
168
+ Spread of sigma points around mean. Default 1e-3.
169
+ beta : float, optional
170
+ Prior knowledge about distribution. Default 2.0 (Gaussian).
171
+ kappa : float, optional
172
+ Secondary scaling parameter. Default 0.0.
173
+
174
+ Returns
175
+ -------
176
+ result : SRKalmanUpdate
177
+ Updated state and Cholesky factor.
178
+
179
+ Examples
180
+ --------
181
+ >>> import numpy as np
182
+ >>> def h(x):
183
+ ... return np.array([x[0]]) # Measure first state
184
+ >>> x = np.array([1.0, 0.5])
185
+ >>> S = np.linalg.cholesky(np.eye(2) * 0.1)
186
+ >>> z = np.array([1.1])
187
+ >>> S_R = np.linalg.cholesky(np.array([[0.05]]))
188
+ >>> upd = sr_ukf_update(x, S, z, h, S_R)
189
+
190
+ See Also
191
+ --------
192
+ sr_ukf_predict : Prediction step.
193
+ """
194
+ x = np.asarray(x, dtype=np.float64).flatten()
195
+ S = np.asarray(S, dtype=np.float64)
196
+ z = np.asarray(z, dtype=np.float64).flatten()
197
+ S_R = np.asarray(S_R, dtype=np.float64)
198
+ n = len(x)
199
+ m = len(z)
200
+
201
+ # Sigma point parameters
202
+ lam = alpha**2 * (n + kappa) - n
203
+ gamma = np.sqrt(n + lam)
204
+
205
+ # Weights
206
+ W_m = np.zeros(2 * n + 1)
207
+ W_c = np.zeros(2 * n + 1)
208
+ W_m[0] = lam / (n + lam)
209
+ W_c[0] = lam / (n + lam) + (1 - alpha**2 + beta)
210
+ for i in range(1, 2 * n + 1):
211
+ W_m[i] = 1 / (2 * (n + lam))
212
+ W_c[i] = 1 / (2 * (n + lam))
213
+
214
+ # Generate sigma points
215
+ sigma_points = np.zeros((n, 2 * n + 1))
216
+ sigma_points[:, 0] = x
217
+ for i in range(n):
218
+ sigma_points[:, i + 1] = x + gamma * S[:, i]
219
+ sigma_points[:, n + i + 1] = x - gamma * S[:, i]
220
+
221
+ # Propagate through measurement function
222
+ Z = np.zeros((m, 2 * n + 1))
223
+ for i in range(2 * n + 1):
224
+ Z[:, i] = h(sigma_points[:, i])
225
+
226
+ # Predicted measurement mean
227
+ z_pred = np.sum(W_m * Z, axis=1)
228
+
229
+ # Innovation
230
+ y = z - z_pred
231
+
232
+ # Innovation covariance square root via QR
233
+ residuals_z = Z[:, 1:] - z_pred[:, np.newaxis]
234
+ sqrt_Wc = np.sqrt(np.abs(W_c[1:]))
235
+ weighted_residuals_z = residuals_z * sqrt_Wc
236
+
237
+ compound_z = np.hstack([weighted_residuals_z, S_R]).T
238
+ _, R_z = np.linalg.qr(compound_z)
239
+ S_y = R_z[:m, :m].T
240
+
241
+ # Handle mean point weight
242
+ v_z = Z[:, 0] - z_pred
243
+ if W_c[0] >= 0:
244
+ S_y = cholesky_update(S_y, np.sqrt(W_c[0]) * v_z, sign=1.0)
245
+
246
+ for i in range(m):
247
+ if S_y[i, i] < 0:
248
+ S_y[i:, i] = -S_y[i:, i]
249
+
250
+ # Cross covariance
251
+ residuals_x = sigma_points[:, 1:] - x[:, np.newaxis]
252
+ P_xz = (
253
+ W_c[0] * np.outer(sigma_points[:, 0] - x, Z[:, 0] - z_pred)
254
+ + (residuals_x * W_c[1:]) @ (Z[:, 1:] - z_pred[:, np.newaxis]).T
255
+ )
256
+
257
+ # Kalman gain
258
+ K = scipy.linalg.solve_triangular(
259
+ S_y.T, scipy.linalg.solve_triangular(S_y, P_xz.T, lower=True), lower=False
260
+ ).T
261
+
262
+ # Updated state
263
+ x_upd = x + K @ y
264
+
265
+ # Updated covariance square root
266
+ S_upd = S.copy()
267
+ KS_y = K @ S_y
268
+ for j in range(m):
269
+ try:
270
+ S_upd = cholesky_update(S_upd, KS_y[:, j], sign=-1.0)
271
+ except ValueError:
272
+ # Fallback: compute directly
273
+ P = S_upd @ S_upd.T - np.outer(KS_y[:, j], KS_y[:, j])
274
+ P = (P + P.T) / 2
275
+ eigvals = np.linalg.eigvalsh(P)
276
+ if np.min(eigvals) < 0:
277
+ P = P + (np.abs(np.min(eigvals)) + 1e-10) * np.eye(n)
278
+ S_upd = np.linalg.cholesky(P)
279
+
280
+ # Likelihood
281
+ det_S_y = np.prod(np.diag(S_y)) ** 2
282
+ if det_S_y > 0:
283
+ y_normalized = scipy.linalg.solve_triangular(S_y, y, lower=True)
284
+ mahal_sq = np.sum(y_normalized**2)
285
+ likelihood = np.exp(-0.5 * mahal_sq) / np.sqrt((2 * np.pi) ** m * det_S_y)
286
+ else:
287
+ likelihood = 0.0
288
+
289
+ return SRKalmanUpdate(
290
+ x=x_upd,
291
+ S=S_upd,
292
+ y=y,
293
+ S_y=S_y,
294
+ K=K,
295
+ likelihood=likelihood,
296
+ )
297
+
298
+
299
+ __all__ = [
300
+ "sr_ukf_predict",
301
+ "sr_ukf_update",
302
+ ]
@@ -0,0 +1,410 @@
1
+ """
2
+ U-D factorization Kalman filter (Bierman's method).
3
+
4
+ The U-D filter represents the covariance matrix as P = U @ D @ U.T where
5
+ U is unit upper triangular and D is diagonal. This provides excellent
6
+ numerical stability with minimal storage requirements.
7
+
8
+ References
9
+ ----------
10
+ .. [1] G. J. Bierman, "Factorization Methods for Discrete Sequential
11
+ Estimation," Academic Press, 1977.
12
+ .. [2] C. L. Thornton and G. J. Bierman, "Gram-Schmidt Algorithms for
13
+ Covariance Propagation," Int. J. Control, 1978.
14
+ """
15
+
16
+ from typing import NamedTuple
17
+
18
+ import numpy as np
19
+ import scipy.linalg
20
+ from numpy.typing import ArrayLike, NDArray
21
+
22
+
23
+ class UDState(NamedTuple):
24
+ """State of a U-D factorization filter.
25
+
26
+ The covariance is represented as P = U @ D @ U.T where U is
27
+ unit upper triangular and D is diagonal.
28
+
29
+ Attributes
30
+ ----------
31
+ x : ndarray
32
+ State estimate.
33
+ U : ndarray
34
+ Unit upper triangular factor.
35
+ D : ndarray
36
+ Diagonal elements (1D array).
37
+ """
38
+
39
+ x: NDArray[np.floating]
40
+ U: NDArray[np.floating]
41
+ D: NDArray[np.floating]
42
+
43
+
44
+ def ud_factorize(P: ArrayLike) -> tuple[NDArray[np.floating], NDArray[np.floating]]:
45
+ """
46
+ Compute U-D factorization of a symmetric positive definite matrix.
47
+
48
+ Decomposes P = U @ D @ U.T where U is unit upper triangular and D is diagonal.
49
+
50
+ Parameters
51
+ ----------
52
+ P : array_like
53
+ Symmetric positive definite matrix, shape (n, n).
54
+
55
+ Returns
56
+ -------
57
+ U : ndarray
58
+ Unit upper triangular matrix.
59
+ D : ndarray
60
+ Diagonal elements (1D array).
61
+
62
+ Notes
63
+ -----
64
+ The U-D factorization is equivalent to a modified Cholesky decomposition
65
+ and requires only n(n+1)/2 storage elements.
66
+
67
+ Examples
68
+ --------
69
+ >>> import numpy as np
70
+ >>> P = np.array([[4.0, 2.0], [2.0, 3.0]])
71
+ >>> U, D = ud_factorize(P)
72
+ >>> np.allclose(U @ np.diag(D) @ U.T, P)
73
+ True
74
+ """
75
+ P = np.asarray(P, dtype=np.float64).copy() # Make a copy to avoid modifying input
76
+ n = P.shape[0]
77
+
78
+ U = np.eye(n)
79
+ D = np.zeros(n)
80
+
81
+ for j in range(n - 1, -1, -1):
82
+ D[j] = P[j, j]
83
+ if D[j] > 0:
84
+ alpha = 1.0 / D[j]
85
+ for k in range(j):
86
+ U[k, j] = P[k, j] * alpha
87
+ for i in range(j):
88
+ for k in range(i + 1):
89
+ P[k, i] = P[k, i] - U[k, j] * D[j] * U[i, j]
90
+
91
+ return U, D
92
+
93
+
94
+ def ud_reconstruct(U: ArrayLike, D: ArrayLike) -> NDArray[np.floating]:
95
+ """
96
+ Reconstruct covariance matrix from U-D factors.
97
+
98
+ Parameters
99
+ ----------
100
+ U : array_like
101
+ Unit upper triangular matrix.
102
+ D : array_like
103
+ Diagonal elements.
104
+
105
+ Returns
106
+ -------
107
+ P : ndarray
108
+ Covariance matrix P = U @ diag(D) @ U.T.
109
+
110
+ Examples
111
+ --------
112
+ >>> import numpy as np
113
+ >>> U = np.array([[1.0, 0.5], [0.0, 1.0]])
114
+ >>> D = np.array([2.0, 1.0])
115
+ >>> P = ud_reconstruct(U, D)
116
+ >>> P
117
+ array([[2.5, 0.5],
118
+ [0.5, 1. ]])
119
+ """
120
+ U = np.asarray(U, dtype=np.float64)
121
+ D = np.asarray(D, dtype=np.float64)
122
+ return U @ np.diag(D) @ U.T
123
+
124
+
125
+ def ud_predict(
126
+ x: ArrayLike,
127
+ U: ArrayLike,
128
+ D: ArrayLike,
129
+ F: ArrayLike,
130
+ Q: ArrayLike,
131
+ ) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
132
+ """
133
+ U-D filter prediction step.
134
+
135
+ Parameters
136
+ ----------
137
+ x : array_like
138
+ Current state estimate, shape (n,).
139
+ U : array_like
140
+ Unit upper triangular factor, shape (n, n).
141
+ D : array_like
142
+ Diagonal elements, shape (n,).
143
+ F : array_like
144
+ State transition matrix, shape (n, n).
145
+ Q : array_like
146
+ Process noise covariance, shape (n, n).
147
+
148
+ Returns
149
+ -------
150
+ x_pred : ndarray
151
+ Predicted state.
152
+ U_pred : ndarray
153
+ Predicted unit upper triangular factor.
154
+ D_pred : ndarray
155
+ Predicted diagonal elements.
156
+
157
+ Examples
158
+ --------
159
+ >>> import numpy as np
160
+ >>> x = np.array([1.0, 0.0])
161
+ >>> U = np.eye(2)
162
+ >>> D = np.array([0.1, 0.1])
163
+ >>> F = np.array([[1, 1], [0, 1]])
164
+ >>> Q = np.eye(2) * 0.01
165
+ >>> x_pred, U_pred, D_pred = ud_predict(x, U, D, F, Q)
166
+ """
167
+ x = np.asarray(x, dtype=np.float64).flatten()
168
+ U = np.asarray(U, dtype=np.float64)
169
+ D = np.asarray(D, dtype=np.float64)
170
+ F = np.asarray(F, dtype=np.float64)
171
+ Q = np.asarray(Q, dtype=np.float64)
172
+
173
+ # Predicted state
174
+ x_pred = F @ x
175
+
176
+ # Predicted covariance: P_pred = F @ P @ F.T + Q
177
+ P = ud_reconstruct(U, D)
178
+ P_pred = F @ P @ F.T + Q
179
+
180
+ # Ensure symmetry
181
+ P_pred = (P_pred + P_pred.T) / 2
182
+
183
+ # Re-factorize
184
+ U_pred, D_pred = ud_factorize(P_pred)
185
+
186
+ return x_pred, U_pred, D_pred
187
+
188
+
189
+ def ud_update_scalar(
190
+ x: ArrayLike,
191
+ U: ArrayLike,
192
+ D: ArrayLike,
193
+ z: float,
194
+ h: ArrayLike,
195
+ r: float,
196
+ ) -> tuple[NDArray[np.floating], NDArray[np.floating], NDArray[np.floating]]:
197
+ """
198
+ U-D filter scalar measurement update (Bierman's algorithm).
199
+
200
+ This is the most efficient form - for vector measurements,
201
+ process each component sequentially.
202
+
203
+ Parameters
204
+ ----------
205
+ x : array_like
206
+ Predicted state estimate, shape (n,).
207
+ U : array_like
208
+ Unit upper triangular factor, shape (n, n).
209
+ D : array_like
210
+ Diagonal elements, shape (n,).
211
+ z : float
212
+ Scalar measurement.
213
+ h : array_like
214
+ Measurement row vector, shape (n,).
215
+ r : float
216
+ Measurement noise variance.
217
+
218
+ Returns
219
+ -------
220
+ x_upd : ndarray
221
+ Updated state.
222
+ U_upd : ndarray
223
+ Updated unit upper triangular factor.
224
+ D_upd : ndarray
225
+ Updated diagonal elements.
226
+
227
+ Notes
228
+ -----
229
+ This implements Bierman's sequential scalar update algorithm which
230
+ is numerically stable and efficient for U-D filters.
231
+
232
+ Examples
233
+ --------
234
+ >>> import numpy as np
235
+ >>> x = np.array([1.0, 0.5])
236
+ >>> U = np.eye(2)
237
+ >>> D = np.array([0.2, 0.1])
238
+ >>> z = 1.1
239
+ >>> h = np.array([1.0, 0.0])
240
+ >>> r = 0.1
241
+ >>> x_upd, U_upd, D_upd = ud_update_scalar(x, U, D, z, h, r)
242
+ """
243
+ x = np.asarray(x, dtype=np.float64).flatten()
244
+ U = np.asarray(U, dtype=np.float64).copy()
245
+ D = np.asarray(D, dtype=np.float64).copy()
246
+ h = np.asarray(h, dtype=np.float64).flatten()
247
+ n = len(x)
248
+
249
+ # f = U.T @ h
250
+ f = U.T @ h
251
+
252
+ # g = D * f (element-wise)
253
+ g = D * f
254
+
255
+ # alpha[0] = r + f[0] * g[0]
256
+ alpha = np.zeros(n + 1)
257
+ alpha[0] = r
258
+
259
+ for j in range(n):
260
+ alpha[j + 1] = alpha[j] + f[j] * g[j]
261
+
262
+ # Innovation
263
+ y = z - h @ x
264
+
265
+ # Update D and U
266
+ D_upd = D.copy()
267
+ U_upd = U.copy()
268
+
269
+ for j in range(n):
270
+ D_upd[j] = D[j] * alpha[j] / alpha[j + 1]
271
+ if j > 0:
272
+ gamma = g[j]
273
+ for i in range(j):
274
+ U_upd[i, j] = U[i, j] + (gamma / alpha[j]) * (f[i] - U[i, j] * f[j])
275
+ g[i] = g[i] + g[j] * U[i, j]
276
+
277
+ # Kalman gain
278
+ K = g / alpha[n]
279
+
280
+ # Updated state
281
+ x_upd = x + K * y
282
+
283
+ return x_upd, U_upd, D_upd
284
+
285
+
286
+ def ud_update(
287
+ x: ArrayLike,
288
+ U: ArrayLike,
289
+ D: ArrayLike,
290
+ z: ArrayLike,
291
+ H: ArrayLike,
292
+ R: ArrayLike,
293
+ ) -> tuple[
294
+ NDArray[np.floating],
295
+ NDArray[np.floating],
296
+ NDArray[np.floating],
297
+ NDArray[np.floating],
298
+ float,
299
+ ]:
300
+ """
301
+ U-D filter vector measurement update.
302
+
303
+ Processes measurements sequentially using scalar updates.
304
+
305
+ Parameters
306
+ ----------
307
+ x : array_like
308
+ Predicted state estimate, shape (n,).
309
+ U : array_like
310
+ Unit upper triangular factor, shape (n, n).
311
+ D : array_like
312
+ Diagonal elements, shape (n,).
313
+ z : array_like
314
+ Measurement vector, shape (m,).
315
+ H : array_like
316
+ Measurement matrix, shape (m, n).
317
+ R : array_like
318
+ Measurement noise covariance, shape (m, m).
319
+ Should be diagonal for sequential processing.
320
+
321
+ Returns
322
+ -------
323
+ x_upd : ndarray
324
+ Updated state.
325
+ U_upd : ndarray
326
+ Updated unit upper triangular factor.
327
+ D_upd : ndarray
328
+ Updated diagonal elements.
329
+ y : ndarray
330
+ Innovation vector.
331
+ likelihood : float
332
+ Measurement likelihood.
333
+
334
+ Notes
335
+ -----
336
+ For correlated measurement noise (non-diagonal R), the measurements
337
+ are decorrelated first using a Cholesky decomposition.
338
+
339
+ Examples
340
+ --------
341
+ >>> import numpy as np
342
+ >>> x = np.array([1.0, 0.5])
343
+ >>> U = np.eye(2)
344
+ >>> D = np.array([0.2, 0.1])
345
+ >>> z = np.array([1.1])
346
+ >>> H = np.array([[1.0, 0.0]])
347
+ >>> R = np.array([[0.1]])
348
+ >>> x_upd, U_upd, D_upd, y, likelihood = ud_update(x, U, D, z, H, R)
349
+ """
350
+ x = np.asarray(x, dtype=np.float64).flatten()
351
+ U = np.asarray(U, dtype=np.float64)
352
+ D = np.asarray(D, dtype=np.float64)
353
+ z = np.asarray(z, dtype=np.float64).flatten()
354
+ H = np.asarray(H, dtype=np.float64)
355
+ R = np.asarray(R, dtype=np.float64)
356
+ m = len(z)
357
+
358
+ # Full innovation before update
359
+ y = z - H @ x
360
+
361
+ # Check if R is diagonal
362
+ is_diagonal = np.allclose(R, np.diag(np.diag(R)))
363
+
364
+ if is_diagonal:
365
+ # Sequential scalar updates
366
+ x_upd = x.copy()
367
+ U_upd = U.copy()
368
+ D_upd = D.copy()
369
+
370
+ for i in range(m):
371
+ x_upd, U_upd, D_upd = ud_update_scalar(
372
+ x_upd, U_upd, D_upd, z[i], H[i, :], R[i, i]
373
+ )
374
+ else:
375
+ # Decorrelate measurements
376
+ S_R = np.linalg.cholesky(R)
377
+ z_dec = scipy.linalg.solve_triangular(S_R, z, lower=True)
378
+ H_dec = scipy.linalg.solve_triangular(S_R, H, lower=True)
379
+
380
+ # Sequential scalar updates with unit variance
381
+ x_upd = x.copy()
382
+ U_upd = U.copy()
383
+ D_upd = D.copy()
384
+
385
+ for i in range(m):
386
+ x_upd, U_upd, D_upd = ud_update_scalar(
387
+ x_upd, U_upd, D_upd, z_dec[i], H_dec[i, :], 1.0
388
+ )
389
+
390
+ # Compute likelihood
391
+ P = ud_reconstruct(U, D)
392
+ S_innov = H @ P @ H.T + R
393
+ det_S = np.linalg.det(S_innov)
394
+ if det_S > 0:
395
+ mahal_sq = y @ np.linalg.solve(S_innov, y)
396
+ likelihood = np.exp(-0.5 * mahal_sq) / np.sqrt((2 * np.pi) ** m * det_S)
397
+ else:
398
+ likelihood = 0.0
399
+
400
+ return x_upd, U_upd, D_upd, y, likelihood
401
+
402
+
403
+ __all__ = [
404
+ "UDState",
405
+ "ud_factorize",
406
+ "ud_reconstruct",
407
+ "ud_predict",
408
+ "ud_update_scalar",
409
+ "ud_update",
410
+ ]