pygnss 2.1.2__cp314-cp314t-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygnss/__init__.py +1 -0
- pygnss/_c_ext/src/constants.c +36 -0
- pygnss/_c_ext/src/hatanaka.c +94 -0
- pygnss/_c_ext/src/helpers.c +17 -0
- pygnss/_c_ext/src/klobuchar.c +313 -0
- pygnss/_c_ext/src/mtable_init.c +50 -0
- pygnss/_c_ext.cpython-314t-darwin.so +0 -0
- pygnss/cl.py +148 -0
- pygnss/constants.py +4 -0
- pygnss/decorator.py +47 -0
- pygnss/file.py +36 -0
- pygnss/filter/__init__.py +77 -0
- pygnss/filter/ekf.py +80 -0
- pygnss/filter/models.py +74 -0
- pygnss/filter/particle.py +484 -0
- pygnss/filter/ukf.py +322 -0
- pygnss/geodetic.py +1177 -0
- pygnss/gnss/__init__.py +0 -0
- pygnss/gnss/edit.py +66 -0
- pygnss/gnss/observables.py +43 -0
- pygnss/gnss/residuals.py +43 -0
- pygnss/gnss/types.py +359 -0
- pygnss/hatanaka.py +70 -0
- pygnss/ionex.py +410 -0
- pygnss/iono/__init__.py +47 -0
- pygnss/iono/chapman.py +35 -0
- pygnss/iono/gim.py +131 -0
- pygnss/logger.py +70 -0
- pygnss/nequick.py +57 -0
- pygnss/orbit/__init__.py +0 -0
- pygnss/orbit/kepler.py +63 -0
- pygnss/orbit/tle.py +186 -0
- pygnss/parsers/rtklib/stats.py +166 -0
- pygnss/rinex.py +2161 -0
- pygnss/sinex.py +121 -0
- pygnss/stats.py +75 -0
- pygnss/tensorial.py +50 -0
- pygnss/time.py +350 -0
- pygnss-2.1.2.dist-info/METADATA +129 -0
- pygnss-2.1.2.dist-info/RECORD +44 -0
- pygnss-2.1.2.dist-info/WHEEL +6 -0
- pygnss-2.1.2.dist-info/entry_points.txt +8 -0
- pygnss-2.1.2.dist-info/licenses/LICENSE +21 -0
- pygnss-2.1.2.dist-info/top_level.txt +1 -0
pygnss/filter/ukf.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from . import StateHandler, Model, FilterInterface
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Ukf(FilterInterface):
|
|
9
|
+
"""
|
|
10
|
+
Class to implement the Unscented Kalman Filter (UKF)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self,
|
|
14
|
+
x: np.array,
|
|
15
|
+
P: np.array,
|
|
16
|
+
Q: np.array,
|
|
17
|
+
model: Model,
|
|
18
|
+
state_handler: StateHandler,
|
|
19
|
+
alpha: float = 1.0,
|
|
20
|
+
beta: float = 2.0,
|
|
21
|
+
kappa: float = 0.0,
|
|
22
|
+
logger: logging.Logger = logging):
|
|
23
|
+
"""
|
|
24
|
+
Initialize the Ukf filter object
|
|
25
|
+
|
|
26
|
+
:param x: a-priori state (n)
|
|
27
|
+
:param P: a-priori error covariance (n x n)
|
|
28
|
+
:param Q: Process noise covariance (n x n)
|
|
29
|
+
:param model: Object of type Model that describes the underlying estimation model
|
|
30
|
+
:param
|
|
31
|
+
:param alpha: Primary scaling parameter
|
|
32
|
+
:param beta: Secondary scaling parameter (Gaussian assumption)
|
|
33
|
+
:param kappa: Tertiary scaling parameter
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
# Store the state, that will be propagated
|
|
37
|
+
self.x = x
|
|
38
|
+
self.P = P
|
|
39
|
+
self.Q = Q
|
|
40
|
+
|
|
41
|
+
self.model = model
|
|
42
|
+
self.state_handler = state_handler
|
|
43
|
+
|
|
44
|
+
self.logger = logger
|
|
45
|
+
|
|
46
|
+
self.L = len(x) # Number of parameters
|
|
47
|
+
|
|
48
|
+
alpha2 = alpha * alpha
|
|
49
|
+
|
|
50
|
+
self.lambd = alpha2 * (self.L + kappa) - self.L
|
|
51
|
+
|
|
52
|
+
# Weights can be computed now, based on the setup input
|
|
53
|
+
n_sigma_points = 2 * self.L + 1
|
|
54
|
+
weight_k = 1.0 / (2.0 * (self.L + self.lambd))
|
|
55
|
+
self.w_m = np.ones((n_sigma_points,)) * weight_k
|
|
56
|
+
self.w_c = self.w_m.copy()
|
|
57
|
+
|
|
58
|
+
k = self.lambd/(self.lambd + self.L)
|
|
59
|
+
|
|
60
|
+
self.w_m[0] = k
|
|
61
|
+
self.w_c[0] = k + 1 - alpha2 + beta
|
|
62
|
+
|
|
63
|
+
def process(self, y_k: np.array, R: np.array, **kwargs):
|
|
64
|
+
"""
|
|
65
|
+
Process an observation batch
|
|
66
|
+
|
|
67
|
+
:param y_k: object that contains the observations
|
|
68
|
+
:param R: matrix with the covariance of the measurement (i.e. measurement noise)
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
# Time update ----------------------------------------------------------
|
|
72
|
+
|
|
73
|
+
chi_p = self._generate_sigma_points()
|
|
74
|
+
|
|
75
|
+
# Obtain the propagated sigma points (chi_m, $\mathcal{X}_{k|k-1}^x$)
|
|
76
|
+
chi_m = np.array([self.model.propagate_state(sigma_point) for sigma_point in chi_p])
|
|
77
|
+
|
|
78
|
+
# From the propagated sigma points, obtain the averaged state ($\hat x_k^-$)
|
|
79
|
+
x_m = np.sum(chi_m * self.w_m[:, np.newaxis], axis=0)
|
|
80
|
+
|
|
81
|
+
# Compute the spread of the sigma points relative to the average
|
|
82
|
+
spread_chi_m = chi_m - x_m
|
|
83
|
+
|
|
84
|
+
# Covariance of the averaged propagated state ($\bf P_k^-$)
|
|
85
|
+
P_m = self.Q + _weighted_average_of_outer_product(spread_chi_m, spread_chi_m, self.w_c)
|
|
86
|
+
|
|
87
|
+
# Propagate the sigma points to the observation space (psi_m, $\mathcal{Y}_{k|k-1}$)
|
|
88
|
+
psi_m = np.array([self.model.to_observations(sigma_point, **kwargs).y_m for sigma_point in chi_m])
|
|
89
|
+
n_dim = len(psi_m.shape)
|
|
90
|
+
if n_dim == 1:
|
|
91
|
+
raise ValueError(f'Unexpected size for sigma point propagation, got [ {n_dim} ], '
|
|
92
|
+
'expected >= 2. Check that the method model.to_observations returns '
|
|
93
|
+
'an array of observations')
|
|
94
|
+
|
|
95
|
+
# Compute the average observation from the given sigma points
|
|
96
|
+
y_m = np.sum(psi_m * self.w_m[:, np.newaxis], axis=0)
|
|
97
|
+
|
|
98
|
+
# Measurement update ---------------------------------------------------
|
|
99
|
+
spread_psi_m = psi_m - y_m
|
|
100
|
+
|
|
101
|
+
P_yy = R + _weighted_average_of_outer_product(spread_psi_m, spread_psi_m, self.w_c)
|
|
102
|
+
P_xy = _weighted_average_of_outer_product(spread_chi_m, spread_psi_m, self.w_c)
|
|
103
|
+
|
|
104
|
+
# Compute state
|
|
105
|
+
self.x = x_m
|
|
106
|
+
self.P = P_m
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
# Kalman gain ($\mathcal{K}$))
|
|
110
|
+
K = P_xy @ np.linalg.inv(P_yy)
|
|
111
|
+
|
|
112
|
+
self.x = self.x + K @ (y_k - y_m)
|
|
113
|
+
self.P = self.P - K @ P_yy @ K.T
|
|
114
|
+
|
|
115
|
+
except np.linalg.LinAlgError as e:
|
|
116
|
+
self.logger.warning(f'Unable to compute state, keeping previous one. Error: {e}')
|
|
117
|
+
|
|
118
|
+
# Compute postfit residuals
|
|
119
|
+
r = y_k - self.model.to_observations(self.x, **kwargs).y_m
|
|
120
|
+
|
|
121
|
+
self.state_handler.process_state(self.x, self.P, postfits=r, **kwargs)
|
|
122
|
+
|
|
123
|
+
def _generate_sigma_points(self) -> np.array:
|
|
124
|
+
"""
|
|
125
|
+
Generate the sigma points
|
|
126
|
+
|
|
127
|
+
>>> x0 = np.array([0.2, 0.6])
|
|
128
|
+
>>> P0 = np.diag([0.8, 0.3])
|
|
129
|
+
>>> ukf_filter = Ukf(x0, P0, None, None, None)
|
|
130
|
+
>>> ukf_filter._generate_sigma_points()
|
|
131
|
+
array([[ 0.2 , 0.6 ],
|
|
132
|
+
[ 1.46491106, 0.6 ],
|
|
133
|
+
[ 0.2 , 1.37459667],
|
|
134
|
+
[-1.06491106, 0.6 ],
|
|
135
|
+
[ 0.2 , -0.17459667]])
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
# self.P = _make_positive_definite(self.P)
|
|
139
|
+
|
|
140
|
+
sqrt_P = np.linalg.cholesky(self.P)
|
|
141
|
+
|
|
142
|
+
offsets = np.sqrt(self.L + self.lambd) * sqrt_P
|
|
143
|
+
|
|
144
|
+
chi_p = np.vstack([self.x, self.x + offsets.T, self.x - offsets.T])
|
|
145
|
+
|
|
146
|
+
return chi_p
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class SquareRootUkf(object):
|
|
150
|
+
"""
|
|
151
|
+
Class to implement the Unscented Kalman Filter (UKF)
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
def __init__(self,
|
|
155
|
+
x: np.array,
|
|
156
|
+
P: np.array,
|
|
157
|
+
model: Model,
|
|
158
|
+
state_handler: StateHandler,
|
|
159
|
+
alpha: float = 1.0,
|
|
160
|
+
beta: float = 2.0,
|
|
161
|
+
kappa: float = 0.0,
|
|
162
|
+
logger: logging.Logger = logging):
|
|
163
|
+
"""
|
|
164
|
+
Initialize the Ukf filter object
|
|
165
|
+
|
|
166
|
+
:param x: a-priori state
|
|
167
|
+
:param P: a-priori error covariance
|
|
168
|
+
:param Phi: State transition matrix (for the time update)
|
|
169
|
+
:param alpha: Primary scaling parameter
|
|
170
|
+
:param beta: Secondary scaling parameter (Gaussian assumption)
|
|
171
|
+
:param kappa: Tertiary scaling parameter
|
|
172
|
+
"""
|
|
173
|
+
|
|
174
|
+
# Store the state, that will be propagated
|
|
175
|
+
self.x = x
|
|
176
|
+
self.S = np.linalg.cholesky(P)
|
|
177
|
+
|
|
178
|
+
self.model = model
|
|
179
|
+
self.state_handler = state_handler
|
|
180
|
+
|
|
181
|
+
self.logger = logger
|
|
182
|
+
|
|
183
|
+
self.L = len(x) # Number of parameters
|
|
184
|
+
|
|
185
|
+
alpha2 = alpha * alpha
|
|
186
|
+
|
|
187
|
+
self.lambd = alpha2 * (self.L + kappa) - self.L
|
|
188
|
+
|
|
189
|
+
# Weights can be computed now, based on the setup input
|
|
190
|
+
n_sigma_points = 2 * self.L + 1
|
|
191
|
+
weight_k = 1.0 / (2.0 * (self.L + self.lambd))
|
|
192
|
+
self.w_m = np.ones((n_sigma_points,)) * weight_k
|
|
193
|
+
self.w_c = self.w_m.copy()
|
|
194
|
+
|
|
195
|
+
k = self.lambd/(self.lambd + self.L)
|
|
196
|
+
|
|
197
|
+
self.w_m[0] = k
|
|
198
|
+
self.w_c[0] = k + 1 - alpha2 + beta
|
|
199
|
+
|
|
200
|
+
def process(self, y_k):
|
|
201
|
+
"""
|
|
202
|
+
Process an observation batch
|
|
203
|
+
|
|
204
|
+
:param y_k: object that contains the observations
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
# Time update ----------------------------------------------------------
|
|
208
|
+
|
|
209
|
+
chi_p = self._generate_sigma_points()
|
|
210
|
+
|
|
211
|
+
# Obtain the propagated sigma points (chi_m, $\mathcal{X}_{k|k-1}^x$)
|
|
212
|
+
chi_m = np.array([self.model.propagate_state(sigma_point) for sigma_point in chi_p])
|
|
213
|
+
|
|
214
|
+
# From the propagated sigma points, obtain the averaged state ($\hat x_k^-$)
|
|
215
|
+
x_m = np.sum(chi_m * self.w_m[:, np.newaxis], axis=0)
|
|
216
|
+
|
|
217
|
+
# Compute the spread of the sigma points relative to the average
|
|
218
|
+
spread_chi_m = chi_m - x_m
|
|
219
|
+
|
|
220
|
+
# Covariance of the averaged propagated state ($\bf P_k^-$)
|
|
221
|
+
P_m = _weighted_average_of_outer_product(spread_chi_m, spread_chi_m, self.w_c)
|
|
222
|
+
|
|
223
|
+
# Propagate the sigma points to the observation space (psi_m, $\mathcal{Y}_{k|k-1}$)
|
|
224
|
+
psi_m = np.array([self.model.to_observations(sigma_point) for sigma_point in chi_p])
|
|
225
|
+
|
|
226
|
+
# Compute the average observation from the given sigma points
|
|
227
|
+
y_m = np.sum(psi_m * self.w_m[:, np.newaxis], axis=0)
|
|
228
|
+
|
|
229
|
+
# Measurement update ---------------------------------------------------
|
|
230
|
+
spread_psi_m = psi_m - y_m
|
|
231
|
+
|
|
232
|
+
P_yy = _weighted_average_of_outer_product(spread_psi_m, spread_psi_m, self.w_c)
|
|
233
|
+
P_xy = _weighted_average_of_outer_product(spread_chi_m, spread_psi_m, self.w_c)
|
|
234
|
+
|
|
235
|
+
# Compute state
|
|
236
|
+
self.x = x_m
|
|
237
|
+
self.P = P_m
|
|
238
|
+
|
|
239
|
+
try:
|
|
240
|
+
# Kalman gain ($\mathcal{K}$))
|
|
241
|
+
K = P_xy @ np.linalg.inv(P_yy)
|
|
242
|
+
|
|
243
|
+
self.x = self.x + K @ (y_k - y_m)
|
|
244
|
+
self.P = self.P - K @ P_yy @ K.T
|
|
245
|
+
|
|
246
|
+
# # Ensure positive definite matrix, known in issue in standard UKF
|
|
247
|
+
# # https://stackoverflow.com/questions/67360472/negative-covariance-matrix-in-unscented-kalman-filter
|
|
248
|
+
# # Get the diagonal of the matrix
|
|
249
|
+
# diagonal = np.diag(self.P).copy()
|
|
250
|
+
# diagonal[diagonal < 0] = 0
|
|
251
|
+
# diagonal += 1.0e-5 # small jitter for regularization
|
|
252
|
+
# np.fill_diagonal(self.P, diagonal)
|
|
253
|
+
|
|
254
|
+
except np.linalg.LinAlgError as e:
|
|
255
|
+
self.logger.warning(f'Unable to compute state, keeping previous one. Error: {e}')
|
|
256
|
+
|
|
257
|
+
self.state_handler.process_state(self.x, self.P)
|
|
258
|
+
|
|
259
|
+
def _generate_sigma_points(self) -> np.array:
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
sqrt_P = np.linalg.cholesky(self.P)
|
|
263
|
+
|
|
264
|
+
chi_p = np.vstack([self.x, self.x + sqrt_P, self.x - sqrt_P])
|
|
265
|
+
|
|
266
|
+
return chi_p
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def _weighted_average_of_outer_product(a: np.array, b: np.array, weights: np.array) -> np.array:
|
|
270
|
+
"""
|
|
271
|
+
Computes the weighted average of the outer products of two arrays of lists
|
|
272
|
+
|
|
273
|
+
Given two arrays $a$ and $b$, this method implements
|
|
274
|
+
|
|
275
|
+
$$
|
|
276
|
+
P = \\sum_{i=0}^N w_i \\cdot \\left( a_i \\cdot b_i^T \\right)
|
|
277
|
+
$$
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
>>> a = np.array([[1, 2], [3, 4]])
|
|
281
|
+
>>> b = np.array([[5, 6], [7, 8]])
|
|
282
|
+
>>> weights = np.array([1, 2])
|
|
283
|
+
>>> _weighted_average_of_outer_product(a, b, weights)
|
|
284
|
+
array([[47, 54],
|
|
285
|
+
[66, 76]])
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
if a.shape[0] != b.shape[0]:
|
|
289
|
+
raise ValueError(f'Number of rows in input vectors differ: [ {a.shape[0]} ] != [ {b.shape[0]} ]')
|
|
290
|
+
|
|
291
|
+
elif a.shape[0] != len(weights):
|
|
292
|
+
raise ValueError(f'Incorrect size of the weights vector: [ {a.shape[0]} ] != [ {len(weights)} ]')
|
|
293
|
+
|
|
294
|
+
n_rows = a.shape[0]
|
|
295
|
+
|
|
296
|
+
products = [np.outer(a[i], b[i]) * weights[i] for i in range(n_rows)]
|
|
297
|
+
|
|
298
|
+
average = np.sum(products, axis=0)
|
|
299
|
+
|
|
300
|
+
return average
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def _make_positive_definite(matrix, epsilon=1e-6):
|
|
304
|
+
"""
|
|
305
|
+
Makes a matrix positive definite by adding a small value to its diagonal.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
matrix: The input matrix (NumPy array).
|
|
309
|
+
epsilon: A small positive value to add to the diagonal (default: 1e-6).
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
A positive definite matrix.
|
|
313
|
+
"""
|
|
314
|
+
|
|
315
|
+
eigenvalues, _ = np.linalg.eig(matrix)
|
|
316
|
+
min_eigenvalue = np.min(eigenvalues)
|
|
317
|
+
|
|
318
|
+
if min_eigenvalue < 0:
|
|
319
|
+
shift = -min_eigenvalue + epsilon
|
|
320
|
+
return matrix + shift * np.eye(matrix.shape[0])
|
|
321
|
+
else:
|
|
322
|
+
return matrix
|