pyest 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyest/__init__.py +11 -0
- pyest/filters/GaussianMixtureFilter.py +87 -0
- pyest/filters/Gmkf.py +202 -0
- pyest/filters/Gmukf.py +168 -0
- pyest/filters/KalmanFilter.py +628 -0
- pyest/filters/UnscentedKalmanFilter.py +262 -0
- pyest/filters/__init__.py +8 -0
- pyest/filters/sigma_points.py +213 -0
- pyest/gm/__init__.py +4 -0
- pyest/gm/defaults.py +82 -0
- pyest/gm/gm.py +941 -0
- pyest/gm/main.py +2 -0
- pyest/gm/reduce.py +297 -0
- pyest/gm/split.py +1695 -0
- pyest/linalg.py +122 -0
- pyest/metrics.py +90 -0
- pyest/particle.py +60 -0
- pyest/sensors/FieldOfView.py +356 -0
- pyest/sensors/__init__.py +1 -0
- pyest/sensors/defaults.py +7 -0
- pyest/tens.py +338 -0
- pyest/utils.py +96 -0
- pyest-0.2.2.dist-info/METADATA +158 -0
- pyest-0.2.2.dist-info/RECORD +26 -0
- pyest-0.2.2.dist-info/WHEEL +4 -0
- pyest-0.2.2.dist-info/licenses/LICENSE.txt +7 -0
pyest/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from pkg_resources import get_distribution, DistributionNotFound
|
|
2
|
+
|
|
3
|
+
try:
|
|
4
|
+
__version__ = get_distribution(__name__).version
|
|
5
|
+
except DistributionNotFound:
|
|
6
|
+
# package is not installed
|
|
7
|
+
pass
|
|
8
|
+
|
|
9
|
+
import pyest.gm
|
|
10
|
+
import pyest.filters
|
|
11
|
+
import pyest.sensors
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
import pyest.gm as pygm
|
|
4
|
+
|
|
5
|
+
class GaussianMixturePredict(ABC):
|
|
6
|
+
""" Discrete Gaussian Mixture Filter Prediction
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def __init__(self, *args, **kwargs):
|
|
11
|
+
super().__init__(*args, **kwargs)
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
def predict(self, tv, pkm1, *args, **kwargs):
|
|
15
|
+
""" propagate gm forward in time
|
|
16
|
+
"""
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class GaussianMixtureUpdate(ABC):
|
|
21
|
+
""" Discrete Gaussian Mixture Kalman Filter Update
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, *args, **kwargs):
|
|
25
|
+
super().__init__(*args, **kwargs)
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
def cond_likelihood_prod(self, m, P, z, h_args=(), interm_vals=False):
|
|
29
|
+
""" compute the product of the linear Gaussian likelihood function and
|
|
30
|
+
another Gaussian pdf
|
|
31
|
+
|
|
32
|
+
Parameters
|
|
33
|
+
----------
|
|
34
|
+
m : ndarray
|
|
35
|
+
(nx x 1) prior mean
|
|
36
|
+
P : ndarray
|
|
37
|
+
(nx x nx) prior covariance matrix
|
|
38
|
+
z : ndarray
|
|
39
|
+
(nz x 1) measurement
|
|
40
|
+
h_args : (optional) tuple
|
|
41
|
+
deterministic parameters to be passed to measurement function
|
|
42
|
+
interm_vals : (optional) Boolean
|
|
43
|
+
if True, returns intermediate values from computation. False by default
|
|
44
|
+
|
|
45
|
+
Returns
|
|
46
|
+
-------
|
|
47
|
+
mp : ndarray
|
|
48
|
+
(nx x 1) posterior mean
|
|
49
|
+
Pp : ndarray
|
|
50
|
+
(nx x nx) posterior state error covariance
|
|
51
|
+
q : float
|
|
52
|
+
likelihood agreement, :math:`q = N(z; h(m), E[(z-h(m))(z-h(m))^T])`
|
|
53
|
+
|
|
54
|
+
If interm_vals is true, additionally returns a dictionary containing:
|
|
55
|
+
W : ndarray
|
|
56
|
+
(nz x nz) innovatations covariance
|
|
57
|
+
C : (ndarray)
|
|
58
|
+
(nx x nz) cross-covariance
|
|
59
|
+
K : (ndarray)
|
|
60
|
+
gain matrix
|
|
61
|
+
zhat : (ndarray)
|
|
62
|
+
predicted measurement
|
|
63
|
+
|
|
64
|
+
"""
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
@abstractmethod
|
|
68
|
+
def update(self, pkm, zk, unnormalized=False, h_args=(), *args, **kwargs):
|
|
69
|
+
""" measurement-update of gm
|
|
70
|
+
|
|
71
|
+
Parameters
|
|
72
|
+
----------
|
|
73
|
+
pkm : GaussianMixture
|
|
74
|
+
prior density at time tk
|
|
75
|
+
zk : ndarray
|
|
76
|
+
(nz,) measurement at time k
|
|
77
|
+
h_args : (optional) tuple
|
|
78
|
+
deterministic parameters to be passed to measurement function
|
|
79
|
+
unnormalized : optional
|
|
80
|
+
if True, returns unnormalized distribution. False by default
|
|
81
|
+
|
|
82
|
+
Returns
|
|
83
|
+
-------
|
|
84
|
+
GaussianMixture
|
|
85
|
+
|
|
86
|
+
"""
|
|
87
|
+
pass
|
pyest/filters/Gmkf.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pyest.gm as pygm
|
|
3
|
+
from pyest.filters import GaussianMixturePredict, GaussianMixtureUpdate
|
|
4
|
+
from pyest.filters import KfdPredict, KfdUpdate
|
|
5
|
+
from pyest.utils import make_tuple
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class GmkfPredict(KfdPredict, GaussianMixturePredict):
|
|
9
|
+
""" Discrete Gaussian Mixture Kalman Filter Prediction
|
|
10
|
+
|
|
11
|
+
Parameters
|
|
12
|
+
----------
|
|
13
|
+
F : ndarray or callable
|
|
14
|
+
(nx,nx) is state transition matrix of the form
|
|
15
|
+
x_k = F(tkm, tk, *args) @ x. If provided an ndarray instead, F will
|
|
16
|
+
automatically be recast as a callable.
|
|
17
|
+
Q : ndarray or callable
|
|
18
|
+
process noise covariance matrix of the form Q(tkm1, tkm). If provided
|
|
19
|
+
an ndarray instead, Q will automatically be recast as a callable.
|
|
20
|
+
M : (optional) ndarray or callable
|
|
21
|
+
process noise mapping matrix of the form M(tkm1, tkm). If provided
|
|
22
|
+
an ndarray instead, M will automatically be recast as a callable.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, *args, **kwargs):
|
|
26
|
+
super().__init__(*args, **kwargs)
|
|
27
|
+
|
|
28
|
+
def predict(self, tv, pkm1, f_args=()):
|
|
29
|
+
""" perform GMKF prediction step
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
tv : tuple
|
|
34
|
+
start and stop times given as tv=(tkm1, tk)
|
|
35
|
+
pkm1 : GaussianMixture
|
|
36
|
+
posterior GM at time k-1
|
|
37
|
+
f_args : (optional) tuple
|
|
38
|
+
tuple of arguments to be additionally passed to the system function
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
GaussianMixture
|
|
43
|
+
prior GM at time k
|
|
44
|
+
"""
|
|
45
|
+
# weights are constant over prediction
|
|
46
|
+
wkm = pkm1.w
|
|
47
|
+
mkm = np.empty_like(pkm1.m, dtype=float)
|
|
48
|
+
Pkm = np.empty_like(pkm1.P, dtype=float)
|
|
49
|
+
# perform time-update on each component
|
|
50
|
+
for i,(_, mkm1, Pkm1) in enumerate(pkm1):
|
|
51
|
+
mkm[i], Pkm[i] = super().predict(tv, mkm1, Pkm1, f_args=f_args)
|
|
52
|
+
|
|
53
|
+
return pygm.GaussianMixture(wkm, mkm, Pkm)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class GmkfUpdate(KfdUpdate, GaussianMixtureUpdate):
|
|
57
|
+
""" Discrete Gaussian Mixture Kalman Filter Update
|
|
58
|
+
|
|
59
|
+
Parameters
|
|
60
|
+
----------
|
|
61
|
+
H : ndarray
|
|
62
|
+
(nz x nx) measurement matrix
|
|
63
|
+
R : ndarray
|
|
64
|
+
(ny,ny) measurement noise covariance matrix
|
|
65
|
+
L : (optional) ndarray
|
|
66
|
+
(nz,ny) mapping matrix mapping measurement noise into
|
|
67
|
+
measurement space
|
|
68
|
+
cov_method : (optional) string
|
|
69
|
+
method to use for covariance update. Valid options include 'general'
|
|
70
|
+
(default), 'Joseph', 'standard', and 'KWK'.
|
|
71
|
+
|
|
72
|
+
Written by Keith LeGrand, March 2019
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(self, *args, **kwargs):
|
|
76
|
+
super().__init__(*args, **kwargs)
|
|
77
|
+
|
|
78
|
+
def __lin_gauss_likelihood_agreement(self, z, zhat, W):
|
|
79
|
+
return pygm.eval_mvnpdf(z, zhat, W)
|
|
80
|
+
|
|
81
|
+
def lin_gauss_cond_likelihood_prod(self, m, P, z, h_args=(), interm_vals=False):
|
|
82
|
+
""" compute the product of the linear Gaussian likelihood function and
|
|
83
|
+
another Gaussian pdf
|
|
84
|
+
|
|
85
|
+
Parameters
|
|
86
|
+
----------
|
|
87
|
+
m : ndarray
|
|
88
|
+
(nx,) prior mean
|
|
89
|
+
P : ndarray
|
|
90
|
+
(nx,nx) prior covariance matrix
|
|
91
|
+
z : ndarray
|
|
92
|
+
(nz,) measurement
|
|
93
|
+
h_args : (optional) tuple
|
|
94
|
+
deterministic parameters to be passed to measurement function
|
|
95
|
+
interm_vals : (optional) Boolean
|
|
96
|
+
if True, returns intermediate values from computation. False by default
|
|
97
|
+
|
|
98
|
+
Returns
|
|
99
|
+
-------
|
|
100
|
+
mp : ndarray
|
|
101
|
+
(nx,) posterior mean
|
|
102
|
+
Pp : ndarray
|
|
103
|
+
(nx,nx) posterior state error covariance
|
|
104
|
+
q : float
|
|
105
|
+
likelihood agreement, :math:`q = N(z; Hm, HPH' + R)`
|
|
106
|
+
|
|
107
|
+
If interm_vals is true, additionally returns a dictionary containing:
|
|
108
|
+
W : ndarray
|
|
109
|
+
(nz,nz) innovatations covariance
|
|
110
|
+
C : (ndarray)
|
|
111
|
+
(nx,nz) cross-covariance
|
|
112
|
+
K : (ndarray)
|
|
113
|
+
gain matrix
|
|
114
|
+
zhat : (ndarray)
|
|
115
|
+
predicted measurement
|
|
116
|
+
|
|
117
|
+
"""
|
|
118
|
+
mp, Pp, interm = super().update(m, P, z, interm_vals=True, h_args=h_args)
|
|
119
|
+
q = self.__lin_gauss_likelihood_agreement(z, interm['zhat'], interm['W'])
|
|
120
|
+
|
|
121
|
+
if not interm_vals:
|
|
122
|
+
return mp, Pp, q
|
|
123
|
+
else:
|
|
124
|
+
return mp, Pp, q, interm
|
|
125
|
+
|
|
126
|
+
def cond_likelihood_prod(self, m, P, z, h_args=(), interm_vals=False):
|
|
127
|
+
""" compute the product of the linear Gaussian likelihood function and
|
|
128
|
+
another Gaussian pdf
|
|
129
|
+
|
|
130
|
+
Parameters
|
|
131
|
+
----------
|
|
132
|
+
m : ndarray
|
|
133
|
+
(nx,) prior mean
|
|
134
|
+
P : ndarray
|
|
135
|
+
(nx,nx) prior covariance matrix
|
|
136
|
+
z : ndarray
|
|
137
|
+
(nz,) measurement
|
|
138
|
+
h_args : (optional) tuple
|
|
139
|
+
deterministic parameters to be passed to measurement function
|
|
140
|
+
interm_vals : (optional) Boolean
|
|
141
|
+
if True, returns intermediate values from computation. False by default
|
|
142
|
+
|
|
143
|
+
Returns
|
|
144
|
+
-------
|
|
145
|
+
mp : ndarray
|
|
146
|
+
(nx,) posterior mean
|
|
147
|
+
Pp : ndarray
|
|
148
|
+
(nx,nx) posterior state error covariance
|
|
149
|
+
q : float
|
|
150
|
+
likelihood agreement, :math:`q = N(z; Hm, HPH' + R)`
|
|
151
|
+
|
|
152
|
+
If interm_vals is true, additionally returns a dictionary containing:
|
|
153
|
+
W : ndarray
|
|
154
|
+
(nz,nz) innovatations covariance
|
|
155
|
+
C : ndarray
|
|
156
|
+
(nx,nz) cross-covariance
|
|
157
|
+
K : ndarray
|
|
158
|
+
gain matrix
|
|
159
|
+
zhat : (ndarray)
|
|
160
|
+
predicted measurement
|
|
161
|
+
|
|
162
|
+
"""
|
|
163
|
+
return self.lin_gauss_cond_likelihood_prod(m, P, z, h_args=h_args, interm_vals=interm_vals)
|
|
164
|
+
|
|
165
|
+
def update(self, pkm, zk, unnormalized=False, h_args=(), *args, **kwargs):
|
|
166
|
+
""" measurement-update of gm
|
|
167
|
+
|
|
168
|
+
Parameters
|
|
169
|
+
----------
|
|
170
|
+
pkm : GaussianMixture
|
|
171
|
+
prior density at time tk
|
|
172
|
+
zk : ndarray
|
|
173
|
+
(nz,) measurement at time k
|
|
174
|
+
unnormalized : optional
|
|
175
|
+
if True, returns unnormalized distribution. False by default
|
|
176
|
+
h_args : (optional) tuple
|
|
177
|
+
deterministic parameters to be passed to measurement function
|
|
178
|
+
"""
|
|
179
|
+
wkp = np.empty_like(pkm.w, dtype=float)
|
|
180
|
+
mkp = np.empty_like(pkm.m, dtype=float)
|
|
181
|
+
Pkp = np.empty_like(pkm.P, dtype=float)
|
|
182
|
+
for i, (wm,mm,Pm) in enumerate(pkm):
|
|
183
|
+
mkp[i], Pkp[i], q = self.lin_gauss_cond_likelihood_prod(mm, Pm, zk, h_args=h_args)
|
|
184
|
+
wkp[i] = wm*q
|
|
185
|
+
|
|
186
|
+
if not unnormalized:
|
|
187
|
+
wkp /= np.sum(wkp)
|
|
188
|
+
|
|
189
|
+
return pygm.GaussianMixture(wkp, mkp, Pkp)
|
|
190
|
+
|
|
191
|
+
def lin_gauss_likelihood_agreement(z, m, P, H, R, L=None, h_args=()):
|
|
192
|
+
""" compute the likelihood agreement of a measurment z with the linear
|
|
193
|
+
Gaussian likelihood function N(z; Hx, HPH' + LRL')
|
|
194
|
+
"""
|
|
195
|
+
if L is None:
|
|
196
|
+
LRLt = R
|
|
197
|
+
else:
|
|
198
|
+
LRLt = L@R@L.t
|
|
199
|
+
|
|
200
|
+
h_args = make_tuple(h_args)
|
|
201
|
+
Hk = H(*h_args)
|
|
202
|
+
return pygm.eval_mvnpdf(z, Hk@m, Hk@P@Hk.T + LRLt)
|
pyest/filters/Gmukf.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pyest.gm as pygm
|
|
3
|
+
from pyest.filters import GaussianMixturePredict, GaussianMixtureUpdate
|
|
4
|
+
from pyest.filters.UnscentedKalmanFilter import UkfPredict, UkfUpdate
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class GmukfPredict(UkfPredict, GaussianMixturePredict):
|
|
8
|
+
""" Gaussian mixture unscented Kalman filter prediction
|
|
9
|
+
|
|
10
|
+
Parameters
|
|
11
|
+
----------
|
|
12
|
+
f : callable
|
|
13
|
+
system function of the form :math:`x_{k+1} = f(x_{k}, t_{k}, t_{k+1})`
|
|
14
|
+
Q : ndarray or callable
|
|
15
|
+
process noise covariance matrix of the form Q(tkm1, tkm). If provided
|
|
16
|
+
an ndarray instead, Q will automatically be recast as a callable.
|
|
17
|
+
M : (optional) ndarray or callable
|
|
18
|
+
process noise mapping matrix of the form M(tkm1, tkm). If provided
|
|
19
|
+
an ndarray instead, M will automatically be recast as a callable.
|
|
20
|
+
alpha : (optional) float
|
|
21
|
+
alpha controls the “size” of the sigma-point distribution and should
|
|
22
|
+
ideally be a small number to avoid sampling non-local effects when the
|
|
23
|
+
nonlinearities are strong. Here “locality” is defined in terms on the
|
|
24
|
+
probabilistic spread of x as summarized by its covariance.
|
|
25
|
+
beta : (optional) float
|
|
26
|
+
beta is a non-negative weighting term which can be used to incorporate
|
|
27
|
+
knowledge of the higher order moments of the distribution. For a Gaussian
|
|
28
|
+
prior the optimal choice is beta=2. This parameter can also be used to
|
|
29
|
+
control the error in the kurtosis which affects the ’heaviness’ of the
|
|
30
|
+
tails of the posterior distribution.
|
|
31
|
+
kappa : (optional) float
|
|
32
|
+
Choose kappa >= 0 to guarantee positive semi-definiteness of the
|
|
33
|
+
covariance matrix. The specific value of kappa is not critical
|
|
34
|
+
though, so a good default choice is kappa =0.
|
|
35
|
+
|
|
36
|
+
"""
|
|
37
|
+
def __init__(self, f, Q, M=None, sigma_pt_opts=None, cov_type='full'):
|
|
38
|
+
super().__init__(f, Q, M=M, sigma_pt_opts=sigma_pt_opts, cov_type=cov_type)
|
|
39
|
+
|
|
40
|
+
def predict(self, tv, pkm1, f_args=()):
|
|
41
|
+
""" perform GMUKF prediction step
|
|
42
|
+
|
|
43
|
+
Parameters
|
|
44
|
+
----------
|
|
45
|
+
tv : tuple
|
|
46
|
+
start and stop times given as tv=(tkm1, tk)
|
|
47
|
+
pkm1 : GaussianMixture
|
|
48
|
+
posterior GM at time k-1
|
|
49
|
+
f_args : (optional) tuple
|
|
50
|
+
tuple of arguments to be additionally passed to the system function
|
|
51
|
+
|
|
52
|
+
Returns
|
|
53
|
+
-------
|
|
54
|
+
GaussianMixture
|
|
55
|
+
prior GM at time k
|
|
56
|
+
"""
|
|
57
|
+
# weights are constant over prediction
|
|
58
|
+
wkm = pkm1.w
|
|
59
|
+
mkm = np.empty_like(pkm1.m, dtype=float)
|
|
60
|
+
Pkm = np.empty_like(pkm1.P, dtype=float)
|
|
61
|
+
# perform time-update on each component
|
|
62
|
+
for i,(_, mkm1, Pkm1) in enumerate(pkm1):
|
|
63
|
+
mkm[i], Pkm[i] = super().predict(tv, mkm1, Pkm1, f_args=f_args)
|
|
64
|
+
|
|
65
|
+
return pygm.GaussianMixture(wkm, mkm, Pkm)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class GmukfUpdate(UkfUpdate, GaussianMixtureUpdate):
|
|
69
|
+
|
|
70
|
+
def __init__(self, h, R, sigma_pt_opts=None, cov_type='full', p=None):
|
|
71
|
+
""" Unscented Kalman filter update
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
h : callable
|
|
76
|
+
measurement function of the form :math:`h(x, *h_args)`
|
|
77
|
+
R : ndarray
|
|
78
|
+
measurement noise covariance matrix
|
|
79
|
+
sigma_pt_opts: (optional) SigmaPointOptions
|
|
80
|
+
options for sigma point placement
|
|
81
|
+
cov_type: (optional) str
|
|
82
|
+
type of covariance form to use. Default is 'full'
|
|
83
|
+
p : (optional) scalar
|
|
84
|
+
underweighting factor. p=1 results in no underweighting. p-->0 results
|
|
85
|
+
in no covariance update
|
|
86
|
+
"""
|
|
87
|
+
super().__init__(h, R, sigma_pt_opts=sigma_pt_opts, cov_type=cov_type, p=p)
|
|
88
|
+
|
|
89
|
+
def __gauss_likelihood_agreement(self, z, zhat, W):
|
|
90
|
+
return pygm.eval_mvnpdf(z, zhat, W)
|
|
91
|
+
|
|
92
|
+
def cond_likelihood_prod(self, m, P, z, h_args=(), interm_vals=False):
|
|
93
|
+
""" compute the product of the (non)linear Gaussian likelihood function and
|
|
94
|
+
another Gaussian pdf
|
|
95
|
+
|
|
96
|
+
Parameters
|
|
97
|
+
----------
|
|
98
|
+
m : ndarray
|
|
99
|
+
(nx,) prior mean
|
|
100
|
+
P : ndarray
|
|
101
|
+
(nx,nx) prior covariance matrix
|
|
102
|
+
z : ndarray
|
|
103
|
+
(nz,) measurement
|
|
104
|
+
h_args : (optional) tuple
|
|
105
|
+
deterministic parameters to be passed to measurement function
|
|
106
|
+
interm_vals : (optional) Boolean
|
|
107
|
+
if True, returns intermediate values from computation. False by default
|
|
108
|
+
|
|
109
|
+
Returns
|
|
110
|
+
-------
|
|
111
|
+
mp : ndarray
|
|
112
|
+
(nx,) posterior mean
|
|
113
|
+
Pp : ndarray
|
|
114
|
+
(nx,nx) posterior state error covariance
|
|
115
|
+
q : float
|
|
116
|
+
likelihood agreement, :math:`q = N(z; h(m), E{(z-\hat{z})(z-\hat{z})^T} + R)`
|
|
117
|
+
|
|
118
|
+
If interm_vals is true, additionally returns a dictionary containing:
|
|
119
|
+
W : ndarray
|
|
120
|
+
(nz,nz) innovatations covariance
|
|
121
|
+
C : ndarray
|
|
122
|
+
(nx,nz) cross-covariance
|
|
123
|
+
K : ndarray
|
|
124
|
+
gain matrix
|
|
125
|
+
zhat : (ndarray)
|
|
126
|
+
predicted measurement
|
|
127
|
+
|
|
128
|
+
"""
|
|
129
|
+
mp, Pp, interm = super().update(m, P, z, interm_vals=True, h_args=h_args)
|
|
130
|
+
q = self.__gauss_likelihood_agreement(z, interm['zhat'], interm['W'])
|
|
131
|
+
|
|
132
|
+
if not interm_vals:
|
|
133
|
+
return mp, Pp, q
|
|
134
|
+
else:
|
|
135
|
+
return mp, Pp, q, interm
|
|
136
|
+
|
|
137
|
+
def update(self, pkm, zk, unnormalized=False, h_args=(), R=None):
|
|
138
|
+
""" perform unscented Kalman filter update
|
|
139
|
+
|
|
140
|
+
Parameters
|
|
141
|
+
----------
|
|
142
|
+
pkm : GaussianMixture
|
|
143
|
+
prior density at time tk
|
|
144
|
+
zk : ndarray
|
|
145
|
+
(nz,) measurement at time k
|
|
146
|
+
unnormalized : optional
|
|
147
|
+
if True, returns unnormalized distribution. False by default
|
|
148
|
+
h_args : (optional) tuple
|
|
149
|
+
deterministic parameters to be passed to measurement function
|
|
150
|
+
R : (optional) ndarray
|
|
151
|
+
(ny,ny) measurement noise covariance matrix
|
|
152
|
+
|
|
153
|
+
Returns
|
|
154
|
+
-------
|
|
155
|
+
GaussianMixture
|
|
156
|
+
posterior density at time tk
|
|
157
|
+
"""
|
|
158
|
+
wkp = np.empty_like(pkm.w, dtype=float)
|
|
159
|
+
mkp = np.empty_like(pkm.m, dtype=float)
|
|
160
|
+
Pkp = np.empty_like(pkm.P, dtype=float)
|
|
161
|
+
for i, (wm,mm,Pm) in enumerate(pkm):
|
|
162
|
+
mkp[i], Pkp[i], q = self.cond_likelihood_prod(mm, Pm, zk, h_args=h_args)
|
|
163
|
+
wkp[i] = wm*q
|
|
164
|
+
|
|
165
|
+
if not unnormalized:
|
|
166
|
+
wkp /= np.sum(wkp)
|
|
167
|
+
|
|
168
|
+
return pygm.GaussianMixture(wkp, mkp, Pkp)
|