nrl-tracker 1.5.0__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {nrl_tracker-1.5.0.dist-info → nrl_tracker-1.6.0.dist-info}/METADATA +7 -6
- {nrl_tracker-1.5.0.dist-info → nrl_tracker-1.6.0.dist-info}/RECORD +12 -9
- pytcl/__init__.py +1 -1
- pytcl/astronomical/__init__.py +66 -0
- pytcl/astronomical/reference_frames.py +632 -1
- pytcl/astronomical/sgp4.py +710 -0
- pytcl/astronomical/tle.py +558 -0
- pytcl/dynamic_estimation/kalman/__init__.py +18 -0
- pytcl/dynamic_estimation/kalman/h_infinity.py +613 -0
- {nrl_tracker-1.5.0.dist-info → nrl_tracker-1.6.0.dist-info}/LICENSE +0 -0
- {nrl_tracker-1.5.0.dist-info → nrl_tracker-1.6.0.dist-info}/WHEEL +0 -0
- {nrl_tracker-1.5.0.dist-info → nrl_tracker-1.6.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,613 @@
|
|
|
1
|
+
"""
|
|
2
|
+
H-infinity filter implementation.
|
|
3
|
+
|
|
4
|
+
This module provides the H-infinity filter, a robust estimation approach
|
|
5
|
+
that provides bounded-error estimation in the presence of model uncertainty.
|
|
6
|
+
Unlike the Kalman filter which minimizes mean-squared error assuming
|
|
7
|
+
Gaussian noise, the H-infinity filter minimizes the worst-case estimation
|
|
8
|
+
error.
|
|
9
|
+
|
|
10
|
+
The H-infinity filter is particularly useful when:
|
|
11
|
+
- Process and measurement noise statistics are uncertain
|
|
12
|
+
- The system model contains unmodeled dynamics
|
|
13
|
+
- Robustness to worst-case disturbances is required
|
|
14
|
+
|
|
15
|
+
References
|
|
16
|
+
----------
|
|
17
|
+
.. [1] Simon, D., "Optimal State Estimation: Kalman, H∞, and Nonlinear
|
|
18
|
+
Approaches," Wiley, 2006.
|
|
19
|
+
.. [2] Shen, X. and Deng, L., "Game Theory Approach to Discrete H∞ Filter
|
|
20
|
+
Design," IEEE Trans. Signal Processing, 1997.
|
|
21
|
+
.. [3] Shaked, U. and Theodor, Y., "H∞-Optimal Estimation: A Tutorial,"
|
|
22
|
+
Proc. IEEE CDC, 1992.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from typing import NamedTuple, Optional
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
from numpy.typing import ArrayLike, NDArray
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class HInfinityUpdate(NamedTuple):
|
|
32
|
+
"""Result of H-infinity filter update step.
|
|
33
|
+
|
|
34
|
+
Attributes
|
|
35
|
+
----------
|
|
36
|
+
x : ndarray
|
|
37
|
+
Updated state estimate.
|
|
38
|
+
P : ndarray
|
|
39
|
+
Updated state covariance (error bound matrix).
|
|
40
|
+
y : ndarray
|
|
41
|
+
Innovation (measurement residual).
|
|
42
|
+
S : ndarray
|
|
43
|
+
Innovation covariance.
|
|
44
|
+
K : ndarray
|
|
45
|
+
Filter gain.
|
|
46
|
+
gamma : float
|
|
47
|
+
Performance bound parameter used.
|
|
48
|
+
feasible : bool
|
|
49
|
+
Whether the solution satisfies the H-infinity constraint.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
x: NDArray[np.floating]
|
|
53
|
+
P: NDArray[np.floating]
|
|
54
|
+
y: NDArray[np.floating]
|
|
55
|
+
S: NDArray[np.floating]
|
|
56
|
+
K: NDArray[np.floating]
|
|
57
|
+
gamma: float
|
|
58
|
+
feasible: bool
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class HInfinityPrediction(NamedTuple):
|
|
62
|
+
"""Result of H-infinity filter prediction step.
|
|
63
|
+
|
|
64
|
+
Attributes
|
|
65
|
+
----------
|
|
66
|
+
x : ndarray
|
|
67
|
+
Predicted state estimate.
|
|
68
|
+
P : ndarray
|
|
69
|
+
Predicted error bound matrix.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
x: NDArray[np.floating]
|
|
73
|
+
P: NDArray[np.floating]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def hinf_predict(
|
|
77
|
+
x: ArrayLike,
|
|
78
|
+
P: ArrayLike,
|
|
79
|
+
F: ArrayLike,
|
|
80
|
+
Q: ArrayLike,
|
|
81
|
+
B: Optional[ArrayLike] = None,
|
|
82
|
+
u: Optional[ArrayLike] = None,
|
|
83
|
+
) -> HInfinityPrediction:
|
|
84
|
+
"""
|
|
85
|
+
H-infinity filter prediction (time update) step.
|
|
86
|
+
|
|
87
|
+
The prediction step is identical to the standard Kalman filter:
|
|
88
|
+
x_pred = F @ x + B @ u
|
|
89
|
+
P_pred = F @ P @ F' + Q
|
|
90
|
+
|
|
91
|
+
Parameters
|
|
92
|
+
----------
|
|
93
|
+
x : array_like
|
|
94
|
+
Current state estimate, shape (n,).
|
|
95
|
+
P : array_like
|
|
96
|
+
Current error bound matrix, shape (n, n).
|
|
97
|
+
F : array_like
|
|
98
|
+
State transition matrix, shape (n, n).
|
|
99
|
+
Q : array_like
|
|
100
|
+
Process noise covariance, shape (n, n).
|
|
101
|
+
B : array_like, optional
|
|
102
|
+
Control input matrix, shape (n, m).
|
|
103
|
+
u : array_like, optional
|
|
104
|
+
Control input, shape (m,).
|
|
105
|
+
|
|
106
|
+
Returns
|
|
107
|
+
-------
|
|
108
|
+
result : HInfinityPrediction
|
|
109
|
+
Named tuple with predicted state x and error bound matrix P.
|
|
110
|
+
|
|
111
|
+
Examples
|
|
112
|
+
--------
|
|
113
|
+
>>> import numpy as np
|
|
114
|
+
>>> x = np.array([0.0, 1.0])
|
|
115
|
+
>>> P = np.eye(2) * 0.1
|
|
116
|
+
>>> F = np.array([[1, 1], [0, 1]])
|
|
117
|
+
>>> Q = np.eye(2) * 0.01
|
|
118
|
+
>>> pred = hinf_predict(x, P, F, Q)
|
|
119
|
+
>>> pred.x
|
|
120
|
+
array([1., 1.])
|
|
121
|
+
|
|
122
|
+
See Also
|
|
123
|
+
--------
|
|
124
|
+
hinf_update : H-infinity measurement update step.
|
|
125
|
+
"""
|
|
126
|
+
x = np.asarray(x, dtype=np.float64).flatten()
|
|
127
|
+
P = np.asarray(P, dtype=np.float64)
|
|
128
|
+
F = np.asarray(F, dtype=np.float64)
|
|
129
|
+
Q = np.asarray(Q, dtype=np.float64)
|
|
130
|
+
|
|
131
|
+
# Predicted state
|
|
132
|
+
x_pred = F @ x
|
|
133
|
+
|
|
134
|
+
# Add control input if provided
|
|
135
|
+
if B is not None and u is not None:
|
|
136
|
+
B = np.asarray(B, dtype=np.float64)
|
|
137
|
+
u = np.asarray(u, dtype=np.float64).flatten()
|
|
138
|
+
x_pred = x_pred + B @ u
|
|
139
|
+
|
|
140
|
+
# Predicted covariance
|
|
141
|
+
P_pred = F @ P @ F.T + Q
|
|
142
|
+
|
|
143
|
+
# Ensure symmetry
|
|
144
|
+
P_pred = (P_pred + P_pred.T) / 2
|
|
145
|
+
|
|
146
|
+
return HInfinityPrediction(x=x_pred, P=P_pred)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def hinf_update(
|
|
150
|
+
x: ArrayLike,
|
|
151
|
+
P: ArrayLike,
|
|
152
|
+
z: ArrayLike,
|
|
153
|
+
H: ArrayLike,
|
|
154
|
+
R: ArrayLike,
|
|
155
|
+
gamma: float,
|
|
156
|
+
L: Optional[ArrayLike] = None,
|
|
157
|
+
) -> HInfinityUpdate:
|
|
158
|
+
"""
|
|
159
|
+
H-infinity filter measurement update step.
|
|
160
|
+
|
|
161
|
+
Computes the updated state estimate that minimizes the worst-case
|
|
162
|
+
estimation error bound. The performance level gamma determines
|
|
163
|
+
the trade-off between robustness and estimation accuracy.
|
|
164
|
+
|
|
165
|
+
The H-infinity filter modifies the Kalman update to account for
|
|
166
|
+
worst-case disturbances on the estimation error:
|
|
167
|
+
|
|
168
|
+
P_inv_mod = P^{-1} - gamma^{-2} * L' @ L + H' @ R^{-1} @ H
|
|
169
|
+
K = P_new @ H' @ R^{-1}
|
|
170
|
+
x_new = x + K @ (z - H @ x)
|
|
171
|
+
P_new = P_inv_mod^{-1}
|
|
172
|
+
|
|
173
|
+
where L is the matrix that weights the estimation error (typically
|
|
174
|
+
the identity matrix or a subset selecting states of interest).
|
|
175
|
+
|
|
176
|
+
Parameters
|
|
177
|
+
----------
|
|
178
|
+
x : array_like
|
|
179
|
+
Predicted state estimate, shape (n,).
|
|
180
|
+
P : array_like
|
|
181
|
+
Predicted error bound matrix, shape (n, n).
|
|
182
|
+
z : array_like
|
|
183
|
+
Measurement vector, shape (m,).
|
|
184
|
+
H : array_like
|
|
185
|
+
Measurement matrix, shape (m, n).
|
|
186
|
+
R : array_like
|
|
187
|
+
Measurement noise covariance, shape (m, m).
|
|
188
|
+
gamma : float
|
|
189
|
+
Performance bound parameter (gamma > 0). Smaller values provide
|
|
190
|
+
more robustness but require the constraint to be feasible.
|
|
191
|
+
As gamma -> infinity, the filter approaches the Kalman filter.
|
|
192
|
+
L : array_like, optional
|
|
193
|
+
Error weighting matrix, shape (p, n). Defines which linear
|
|
194
|
+
combinations of states to bound. Default is identity (all states).
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
result : HInfinityUpdate
|
|
199
|
+
Named tuple containing:
|
|
200
|
+
- x: Updated state estimate
|
|
201
|
+
- P: Updated error bound matrix
|
|
202
|
+
- y: Innovation
|
|
203
|
+
- S: Innovation covariance
|
|
204
|
+
- K: Filter gain
|
|
205
|
+
- gamma: Performance bound used
|
|
206
|
+
- feasible: Whether the H-infinity constraint is satisfied
|
|
207
|
+
|
|
208
|
+
Notes
|
|
209
|
+
-----
|
|
210
|
+
The H-infinity constraint is feasible if and only if:
|
|
211
|
+
P^{-1} - gamma^{-2} * L' @ L + H' @ R^{-1} @ H > 0
|
|
212
|
+
|
|
213
|
+
If the constraint is not feasible (returns feasible=False), the result
|
|
214
|
+
uses a regularized solution and may not satisfy the performance bound.
|
|
215
|
+
|
|
216
|
+
The parameter gamma should be chosen based on the desired robustness
|
|
217
|
+
level. Typical values range from 1 to 100. Lower values provide more
|
|
218
|
+
robustness but are more restrictive.
|
|
219
|
+
|
|
220
|
+
Examples
|
|
221
|
+
--------
|
|
222
|
+
>>> import numpy as np
|
|
223
|
+
>>> x = np.array([1.0, 1.0])
|
|
224
|
+
>>> P = np.eye(2) * 0.1
|
|
225
|
+
>>> z = np.array([1.1])
|
|
226
|
+
>>> H = np.array([[1.0, 0.0]])
|
|
227
|
+
>>> R = np.array([[0.01]])
|
|
228
|
+
>>> gamma = 10.0
|
|
229
|
+
>>> result = hinf_update(x, P, z, H, R, gamma)
|
|
230
|
+
>>> result.feasible
|
|
231
|
+
True
|
|
232
|
+
|
|
233
|
+
See Also
|
|
234
|
+
--------
|
|
235
|
+
hinf_predict : H-infinity prediction step.
|
|
236
|
+
hinf_predict_update : Combined predict and update step.
|
|
237
|
+
|
|
238
|
+
References
|
|
239
|
+
----------
|
|
240
|
+
.. [1] Simon, D., "Optimal State Estimation," Chapter 6, Wiley, 2006.
|
|
241
|
+
"""
|
|
242
|
+
x = np.asarray(x, dtype=np.float64).flatten()
|
|
243
|
+
P = np.asarray(P, dtype=np.float64)
|
|
244
|
+
z = np.asarray(z, dtype=np.float64).flatten()
|
|
245
|
+
H = np.asarray(H, dtype=np.float64)
|
|
246
|
+
R = np.asarray(R, dtype=np.float64)
|
|
247
|
+
|
|
248
|
+
n = len(x)
|
|
249
|
+
|
|
250
|
+
# Default L to identity (bound all states equally)
|
|
251
|
+
if L is None:
|
|
252
|
+
L = np.eye(n)
|
|
253
|
+
else:
|
|
254
|
+
L = np.asarray(L, dtype=np.float64)
|
|
255
|
+
|
|
256
|
+
# Innovation
|
|
257
|
+
y = z - H @ x
|
|
258
|
+
|
|
259
|
+
# Innovation covariance (standard Kalman)
|
|
260
|
+
S = H @ P @ H.T + R
|
|
261
|
+
|
|
262
|
+
# H-infinity modification
|
|
263
|
+
# Compute: P^{-1} - gamma^{-2} * L' @ L + H' @ R^{-1} @ H
|
|
264
|
+
try:
|
|
265
|
+
P_inv = np.linalg.inv(P)
|
|
266
|
+
R_inv = np.linalg.inv(R)
|
|
267
|
+
except np.linalg.LinAlgError:
|
|
268
|
+
# Fallback to pseudo-inverse
|
|
269
|
+
P_inv = np.linalg.pinv(P)
|
|
270
|
+
R_inv = np.linalg.pinv(R)
|
|
271
|
+
|
|
272
|
+
gamma_sq_inv = 1.0 / (gamma * gamma)
|
|
273
|
+
|
|
274
|
+
# Modified information matrix
|
|
275
|
+
P_inv_mod = P_inv - gamma_sq_inv * (L.T @ L) + H.T @ R_inv @ H
|
|
276
|
+
|
|
277
|
+
# Check feasibility: P_inv_mod must be positive definite
|
|
278
|
+
try:
|
|
279
|
+
eigvals = np.linalg.eigvalsh(P_inv_mod)
|
|
280
|
+
feasible = bool(np.all(eigvals > 0))
|
|
281
|
+
except np.linalg.LinAlgError:
|
|
282
|
+
feasible = False
|
|
283
|
+
|
|
284
|
+
if feasible:
|
|
285
|
+
# Standard H-infinity update
|
|
286
|
+
try:
|
|
287
|
+
P_new = np.linalg.inv(P_inv_mod)
|
|
288
|
+
except np.linalg.LinAlgError:
|
|
289
|
+
P_new = np.linalg.pinv(P_inv_mod)
|
|
290
|
+
feasible = False
|
|
291
|
+
else:
|
|
292
|
+
# Regularize to make feasible
|
|
293
|
+
# Add small regularization to make positive definite
|
|
294
|
+
reg = abs(min(0, float(np.min(eigvals)))) + 1e-6
|
|
295
|
+
P_inv_mod_reg = P_inv_mod + reg * np.eye(n)
|
|
296
|
+
try:
|
|
297
|
+
P_new = np.linalg.inv(P_inv_mod_reg)
|
|
298
|
+
except np.linalg.LinAlgError:
|
|
299
|
+
P_new = np.linalg.pinv(P_inv_mod_reg)
|
|
300
|
+
|
|
301
|
+
# Ensure symmetry
|
|
302
|
+
P_new = (P_new + P_new.T) / 2
|
|
303
|
+
|
|
304
|
+
# H-infinity gain
|
|
305
|
+
K = P_new @ H.T @ R_inv
|
|
306
|
+
|
|
307
|
+
# Updated state
|
|
308
|
+
x_new = x + K @ y
|
|
309
|
+
|
|
310
|
+
return HInfinityUpdate(
|
|
311
|
+
x=x_new,
|
|
312
|
+
P=P_new,
|
|
313
|
+
y=y,
|
|
314
|
+
S=S,
|
|
315
|
+
K=K,
|
|
316
|
+
gamma=gamma,
|
|
317
|
+
feasible=feasible,
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def hinf_predict_update(
|
|
322
|
+
x: ArrayLike,
|
|
323
|
+
P: ArrayLike,
|
|
324
|
+
z: ArrayLike,
|
|
325
|
+
F: ArrayLike,
|
|
326
|
+
Q: ArrayLike,
|
|
327
|
+
H: ArrayLike,
|
|
328
|
+
R: ArrayLike,
|
|
329
|
+
gamma: float,
|
|
330
|
+
B: Optional[ArrayLike] = None,
|
|
331
|
+
u: Optional[ArrayLike] = None,
|
|
332
|
+
L: Optional[ArrayLike] = None,
|
|
333
|
+
) -> HInfinityUpdate:
|
|
334
|
+
"""
|
|
335
|
+
Combined H-infinity filter prediction and update step.
|
|
336
|
+
|
|
337
|
+
Performs prediction followed by measurement update in a single call.
|
|
338
|
+
|
|
339
|
+
Parameters
|
|
340
|
+
----------
|
|
341
|
+
x : array_like
|
|
342
|
+
Current state estimate, shape (n,).
|
|
343
|
+
P : array_like
|
|
344
|
+
Current error bound matrix, shape (n, n).
|
|
345
|
+
z : array_like
|
|
346
|
+
Measurement vector, shape (m,).
|
|
347
|
+
F : array_like
|
|
348
|
+
State transition matrix, shape (n, n).
|
|
349
|
+
Q : array_like
|
|
350
|
+
Process noise covariance, shape (n, n).
|
|
351
|
+
H : array_like
|
|
352
|
+
Measurement matrix, shape (m, n).
|
|
353
|
+
R : array_like
|
|
354
|
+
Measurement noise covariance, shape (m, m).
|
|
355
|
+
gamma : float
|
|
356
|
+
Performance bound parameter (gamma > 0).
|
|
357
|
+
B : array_like, optional
|
|
358
|
+
Control input matrix, shape (n, m).
|
|
359
|
+
u : array_like, optional
|
|
360
|
+
Control input, shape (m,).
|
|
361
|
+
L : array_like, optional
|
|
362
|
+
Error weighting matrix, shape (p, n).
|
|
363
|
+
|
|
364
|
+
Returns
|
|
365
|
+
-------
|
|
366
|
+
result : HInfinityUpdate
|
|
367
|
+
Named tuple with updated state, covariance, and filter quantities.
|
|
368
|
+
|
|
369
|
+
See Also
|
|
370
|
+
--------
|
|
371
|
+
hinf_predict : Prediction step only.
|
|
372
|
+
hinf_update : Update step only.
|
|
373
|
+
"""
|
|
374
|
+
pred = hinf_predict(x, P, F, Q, B, u)
|
|
375
|
+
return hinf_update(pred.x, pred.P, z, H, R, gamma, L)
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
def extended_hinf_update(
|
|
379
|
+
x: ArrayLike,
|
|
380
|
+
P: ArrayLike,
|
|
381
|
+
z: ArrayLike,
|
|
382
|
+
h: callable,
|
|
383
|
+
H: ArrayLike,
|
|
384
|
+
R: ArrayLike,
|
|
385
|
+
gamma: float,
|
|
386
|
+
L: Optional[ArrayLike] = None,
|
|
387
|
+
) -> HInfinityUpdate:
|
|
388
|
+
"""
|
|
389
|
+
Extended H-infinity filter measurement update for nonlinear systems.
|
|
390
|
+
|
|
391
|
+
Uses a linearized measurement model around the current estimate,
|
|
392
|
+
similar to the extended Kalman filter approach.
|
|
393
|
+
|
|
394
|
+
Parameters
|
|
395
|
+
----------
|
|
396
|
+
x : array_like
|
|
397
|
+
Predicted state estimate, shape (n,).
|
|
398
|
+
P : array_like
|
|
399
|
+
Predicted error bound matrix, shape (n, n).
|
|
400
|
+
z : array_like
|
|
401
|
+
Measurement vector, shape (m,).
|
|
402
|
+
h : callable
|
|
403
|
+
Nonlinear measurement function h(x) -> z_pred.
|
|
404
|
+
H : array_like
|
|
405
|
+
Measurement Jacobian dh/dx evaluated at x, shape (m, n).
|
|
406
|
+
R : array_like
|
|
407
|
+
Measurement noise covariance, shape (m, m).
|
|
408
|
+
gamma : float
|
|
409
|
+
Performance bound parameter (gamma > 0).
|
|
410
|
+
L : array_like, optional
|
|
411
|
+
Error weighting matrix, shape (p, n).
|
|
412
|
+
|
|
413
|
+
Returns
|
|
414
|
+
-------
|
|
415
|
+
result : HInfinityUpdate
|
|
416
|
+
Named tuple with updated state and covariance.
|
|
417
|
+
|
|
418
|
+
Notes
|
|
419
|
+
-----
|
|
420
|
+
The innovation is computed using the nonlinear function:
|
|
421
|
+
y = z - h(x)
|
|
422
|
+
|
|
423
|
+
while the gain computation uses the linearized Jacobian H.
|
|
424
|
+
|
|
425
|
+
See Also
|
|
426
|
+
--------
|
|
427
|
+
hinf_update : Linear H-infinity update.
|
|
428
|
+
"""
|
|
429
|
+
x = np.asarray(x, dtype=np.float64).flatten()
|
|
430
|
+
P = np.asarray(P, dtype=np.float64)
|
|
431
|
+
z = np.asarray(z, dtype=np.float64).flatten()
|
|
432
|
+
H = np.asarray(H, dtype=np.float64)
|
|
433
|
+
R = np.asarray(R, dtype=np.float64)
|
|
434
|
+
|
|
435
|
+
n = len(x)
|
|
436
|
+
|
|
437
|
+
# Default L to identity
|
|
438
|
+
if L is None:
|
|
439
|
+
L = np.eye(n)
|
|
440
|
+
else:
|
|
441
|
+
L = np.asarray(L, dtype=np.float64)
|
|
442
|
+
|
|
443
|
+
# Nonlinear innovation
|
|
444
|
+
z_pred = h(x)
|
|
445
|
+
y = z - z_pred
|
|
446
|
+
|
|
447
|
+
# Innovation covariance
|
|
448
|
+
S = H @ P @ H.T + R
|
|
449
|
+
|
|
450
|
+
# H-infinity modification
|
|
451
|
+
try:
|
|
452
|
+
P_inv = np.linalg.inv(P)
|
|
453
|
+
R_inv = np.linalg.inv(R)
|
|
454
|
+
except np.linalg.LinAlgError:
|
|
455
|
+
P_inv = np.linalg.pinv(P)
|
|
456
|
+
R_inv = np.linalg.pinv(R)
|
|
457
|
+
|
|
458
|
+
gamma_sq_inv = 1.0 / (gamma * gamma)
|
|
459
|
+
P_inv_mod = P_inv - gamma_sq_inv * (L.T @ L) + H.T @ R_inv @ H
|
|
460
|
+
|
|
461
|
+
# Check feasibility
|
|
462
|
+
try:
|
|
463
|
+
eigvals = np.linalg.eigvalsh(P_inv_mod)
|
|
464
|
+
feasible = bool(np.all(eigvals > 0))
|
|
465
|
+
except np.linalg.LinAlgError:
|
|
466
|
+
feasible = False
|
|
467
|
+
|
|
468
|
+
if feasible:
|
|
469
|
+
try:
|
|
470
|
+
P_new = np.linalg.inv(P_inv_mod)
|
|
471
|
+
except np.linalg.LinAlgError:
|
|
472
|
+
P_new = np.linalg.pinv(P_inv_mod)
|
|
473
|
+
feasible = False
|
|
474
|
+
else:
|
|
475
|
+
reg = abs(min(0, float(np.min(eigvals)))) + 1e-6
|
|
476
|
+
P_inv_mod_reg = P_inv_mod + reg * np.eye(n)
|
|
477
|
+
try:
|
|
478
|
+
P_new = np.linalg.inv(P_inv_mod_reg)
|
|
479
|
+
except np.linalg.LinAlgError:
|
|
480
|
+
P_new = np.linalg.pinv(P_inv_mod_reg)
|
|
481
|
+
|
|
482
|
+
P_new = (P_new + P_new.T) / 2
|
|
483
|
+
|
|
484
|
+
K = P_new @ H.T @ R_inv
|
|
485
|
+
x_new = x + K @ y
|
|
486
|
+
|
|
487
|
+
return HInfinityUpdate(
|
|
488
|
+
x=x_new,
|
|
489
|
+
P=P_new,
|
|
490
|
+
y=y,
|
|
491
|
+
S=S,
|
|
492
|
+
K=K,
|
|
493
|
+
gamma=gamma,
|
|
494
|
+
feasible=feasible,
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
def find_min_gamma(
|
|
499
|
+
P: ArrayLike,
|
|
500
|
+
H: ArrayLike,
|
|
501
|
+
R: ArrayLike,
|
|
502
|
+
L: Optional[ArrayLike] = None,
|
|
503
|
+
tol: float = 1e-6,
|
|
504
|
+
) -> float:
|
|
505
|
+
"""
|
|
506
|
+
Find the minimum feasible gamma for H-infinity filtering.
|
|
507
|
+
|
|
508
|
+
Computes the minimum value of gamma for which the H-infinity
|
|
509
|
+
constraint is satisfied (P_inv_mod is positive definite).
|
|
510
|
+
|
|
511
|
+
Parameters
|
|
512
|
+
----------
|
|
513
|
+
P : array_like
|
|
514
|
+
Predicted error bound matrix, shape (n, n).
|
|
515
|
+
H : array_like
|
|
516
|
+
Measurement matrix, shape (m, n).
|
|
517
|
+
R : array_like
|
|
518
|
+
Measurement noise covariance, shape (m, m).
|
|
519
|
+
L : array_like, optional
|
|
520
|
+
Error weighting matrix, shape (p, n). Default is identity.
|
|
521
|
+
tol : float, optional
|
|
522
|
+
Tolerance for feasibility check. Default 1e-6.
|
|
523
|
+
|
|
524
|
+
Returns
|
|
525
|
+
-------
|
|
526
|
+
gamma_min : float
|
|
527
|
+
Minimum feasible gamma value.
|
|
528
|
+
|
|
529
|
+
Notes
|
|
530
|
+
-----
|
|
531
|
+
The minimum gamma is found by solving for when the minimum
|
|
532
|
+
eigenvalue of P_inv_mod equals zero:
|
|
533
|
+
|
|
534
|
+
min_eig(P^{-1} - gamma^{-2} * L' @ L + H' @ R^{-1} @ H) = 0
|
|
535
|
+
|
|
536
|
+
This is solved via bisection search.
|
|
537
|
+
|
|
538
|
+
Examples
|
|
539
|
+
--------
|
|
540
|
+
>>> import numpy as np
|
|
541
|
+
>>> P = np.eye(2) * 0.1
|
|
542
|
+
>>> H = np.array([[1.0, 0.0]])
|
|
543
|
+
>>> R = np.array([[0.01]])
|
|
544
|
+
>>> gamma_min = find_min_gamma(P, H, R)
|
|
545
|
+
>>> gamma_min < 1.0 # Typical result
|
|
546
|
+
True
|
|
547
|
+
"""
|
|
548
|
+
P = np.asarray(P, dtype=np.float64)
|
|
549
|
+
H = np.asarray(H, dtype=np.float64)
|
|
550
|
+
R = np.asarray(R, dtype=np.float64)
|
|
551
|
+
|
|
552
|
+
n = P.shape[0]
|
|
553
|
+
|
|
554
|
+
if L is None:
|
|
555
|
+
L = np.eye(n)
|
|
556
|
+
else:
|
|
557
|
+
L = np.asarray(L, dtype=np.float64)
|
|
558
|
+
|
|
559
|
+
try:
|
|
560
|
+
P_inv = np.linalg.inv(P)
|
|
561
|
+
R_inv = np.linalg.inv(R)
|
|
562
|
+
except np.linalg.LinAlgError:
|
|
563
|
+
P_inv = np.linalg.pinv(P)
|
|
564
|
+
R_inv = np.linalg.pinv(R)
|
|
565
|
+
|
|
566
|
+
# Base term (without gamma contribution)
|
|
567
|
+
base = P_inv + H.T @ R_inv @ H
|
|
568
|
+
|
|
569
|
+
# L term
|
|
570
|
+
LtL = L.T @ L
|
|
571
|
+
|
|
572
|
+
def min_eigenvalue(gamma: float) -> float:
|
|
573
|
+
"""Minimum eigenvalue of P_inv_mod."""
|
|
574
|
+
gamma_sq_inv = 1.0 / (gamma * gamma)
|
|
575
|
+
P_inv_mod = base - gamma_sq_inv * LtL
|
|
576
|
+
return np.min(np.linalg.eigvalsh(P_inv_mod))
|
|
577
|
+
|
|
578
|
+
# Binary search for minimum gamma
|
|
579
|
+
# Start with a wide range
|
|
580
|
+
gamma_low = 0.01
|
|
581
|
+
gamma_high = 1000.0
|
|
582
|
+
|
|
583
|
+
# Ensure we bracket the solution
|
|
584
|
+
while min_eigenvalue(gamma_high) < 0:
|
|
585
|
+
gamma_high *= 2
|
|
586
|
+
if gamma_high > 1e10:
|
|
587
|
+
return float("inf")
|
|
588
|
+
|
|
589
|
+
while min_eigenvalue(gamma_low) > tol:
|
|
590
|
+
gamma_low /= 2
|
|
591
|
+
if gamma_low < 1e-10:
|
|
592
|
+
return 0.0
|
|
593
|
+
|
|
594
|
+
# Bisection search
|
|
595
|
+
while gamma_high - gamma_low > tol:
|
|
596
|
+
gamma_mid = (gamma_low + gamma_high) / 2
|
|
597
|
+
if min_eigenvalue(gamma_mid) > 0:
|
|
598
|
+
gamma_high = gamma_mid
|
|
599
|
+
else:
|
|
600
|
+
gamma_low = gamma_mid
|
|
601
|
+
|
|
602
|
+
return gamma_high
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
__all__ = [
|
|
606
|
+
"HInfinityUpdate",
|
|
607
|
+
"HInfinityPrediction",
|
|
608
|
+
"hinf_predict",
|
|
609
|
+
"hinf_update",
|
|
610
|
+
"hinf_predict_update",
|
|
611
|
+
"extended_hinf_update",
|
|
612
|
+
"find_min_gamma",
|
|
613
|
+
]
|
|
File without changes
|
|
File without changes
|
|
File without changes
|