py-neuromodulation 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. py_neuromodulation/__init__.py +16 -10
  2. py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +2 -2
  3. py_neuromodulation/analysis/__init__.py +4 -0
  4. py_neuromodulation/{nm_decode.py → analysis/decode.py} +4 -4
  5. py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +21 -20
  6. py_neuromodulation/{nm_plots.py → analysis/plots.py} +54 -12
  7. py_neuromodulation/{nm_stats.py → analysis/stats.py} +2 -8
  8. py_neuromodulation/{nm_settings.yaml → default_settings.yaml} +6 -9
  9. py_neuromodulation/features/__init__.py +31 -0
  10. py_neuromodulation/features/bandpower.py +165 -0
  11. py_neuromodulation/{nm_bispectra.py → features/bispectra.py} +8 -5
  12. py_neuromodulation/{nm_bursts.py → features/bursts.py} +14 -9
  13. py_neuromodulation/{nm_coherence.py → features/coherence.py} +17 -13
  14. py_neuromodulation/{nm_features.py → features/feature_processor.py} +30 -53
  15. py_neuromodulation/{nm_fooof.py → features/fooof.py} +11 -8
  16. py_neuromodulation/{nm_hjorth_raw.py → features/hjorth_raw.py} +10 -5
  17. py_neuromodulation/{nm_linelength.py → features/linelength.py} +1 -1
  18. py_neuromodulation/{nm_mne_connectivity.py → features/mne_connectivity.py} +5 -6
  19. py_neuromodulation/{nm_nolds.py → features/nolds.py} +5 -7
  20. py_neuromodulation/{nm_oscillatory.py → features/oscillatory.py} +7 -181
  21. py_neuromodulation/{nm_sharpwaves.py → features/sharpwaves.py} +13 -4
  22. py_neuromodulation/filter/__init__.py +3 -0
  23. py_neuromodulation/{nm_kalmanfilter.py → filter/kalman_filter.py} +67 -71
  24. py_neuromodulation/filter/kalman_filter_external.py +1890 -0
  25. py_neuromodulation/{nm_filter.py → filter/mne_filter.py} +128 -219
  26. py_neuromodulation/filter/notch_filter.py +93 -0
  27. py_neuromodulation/processing/__init__.py +10 -0
  28. py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +2 -3
  29. py_neuromodulation/{nm_preprocessing.py → processing/data_preprocessor.py} +19 -25
  30. py_neuromodulation/{nm_filter_preprocessing.py → processing/filter_preprocessing.py} +3 -4
  31. py_neuromodulation/{nm_normalization.py → processing/normalization.py} +9 -7
  32. py_neuromodulation/{nm_projection.py → processing/projection.py} +14 -14
  33. py_neuromodulation/{nm_rereference.py → processing/rereference.py} +13 -13
  34. py_neuromodulation/{nm_resample.py → processing/resample.py} +1 -4
  35. py_neuromodulation/stream/__init__.py +3 -0
  36. py_neuromodulation/{nm_run_analysis.py → stream/data_processor.py} +42 -42
  37. py_neuromodulation/stream/generator.py +53 -0
  38. py_neuromodulation/{nm_mnelsl_generator.py → stream/mnelsl_player.py} +10 -6
  39. py_neuromodulation/{nm_mnelsl_stream.py → stream/mnelsl_stream.py} +13 -9
  40. py_neuromodulation/{nm_settings.py → stream/settings.py} +27 -24
  41. py_neuromodulation/{nm_stream.py → stream/stream.py} +217 -188
  42. py_neuromodulation/utils/__init__.py +2 -0
  43. py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +14 -9
  44. py_neuromodulation/{nm_database.py → utils/database.py} +2 -2
  45. py_neuromodulation/{nm_IO.py → utils/io.py} +42 -77
  46. py_neuromodulation/utils/keyboard.py +52 -0
  47. py_neuromodulation/{nm_logger.py → utils/logging.py} +3 -3
  48. py_neuromodulation/{nm_types.py → utils/types.py} +72 -14
  49. {py_neuromodulation-0.0.5.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +3 -11
  50. py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
  51. py_neuromodulation/FieldTrip.py +0 -589
  52. py_neuromodulation/_write_example_dataset_helper.py +0 -83
  53. py_neuromodulation/nm_generator.py +0 -45
  54. py_neuromodulation/nm_stream_abc.py +0 -166
  55. py_neuromodulation-0.0.5.dist-info/RECORD +0 -83
  56. {py_neuromodulation-0.0.5.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +0 -0
  57. {py_neuromodulation-0.0.5.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1890 @@
1
+ """
2
+ This file contains unmodified code from the FilterPy library, specifically the KalmanFilter class.
3
+ Original sources:
4
+ - https://github.com/rlabbe/filterpy/blob/master/filterpy/kalman/kalman_filter.py
5
+ - https://github.com/rlabbe/filterpy/blob/master/filterpy/common/discretization.py
6
+ - https://github.com/rlabbe/filterpy/blob/master/filterpy/stats/stats.py
7
+
8
+ Certain modifications were made to the original code:
9
+ - Updated some code to be compatible with newer Python versions and package versions
10
+ - Made some simplifications to adjust the code for our purposes
11
+ - Refactored some code to be more in line with our codebase style
12
+
13
+ Original software under MIT license:
14
+
15
+ Copyright (c) 2015 Roger R. Labbe Jr
16
+
17
+ Permission is hereby granted, free of charge, to any person obtaining a copy
18
+ of this software and associated documentation files (the "Software"), to deal
19
+ in the Software without restriction, including without limitation the rights
20
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
21
+ copies of the Software, and to permit persons to whom the Software is
22
+ furnished to do so, subject to the following conditions:
23
+
24
+ The above copyright notice and this permission notice shall be included in
25
+ all copies or substantial portions of the Software.
26
+
27
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
30
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
31
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
32
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33
+ THE SOFTWARE.
34
+
35
+ ------------------------------------------------------------------------------------
36
+
37
+ This module implements the linear Kalman filter in both an object
38
+ oriented and procedural form. The KalmanFilter class implements
39
+ the filter by storing the various matrices in instance variables,
40
+ minimizing the amount of bookkeeping you have to do.
41
+
42
+ All Kalman filters operate with a predict->update cycle. The
43
+ predict step, implemented with the method or function predict(),
44
+ uses the state transition matrix F to predict the state in the next
45
+ time period (epoch). The state is stored as a gaussian (x, P), where
46
+ x is the state (column) vector, and P is its covariance. Covariance
47
+ matrix Q specifies the process covariance. In Bayesian terms, this
48
+ prediction is called the *prior*, which you can think of colloquially
49
+ as the estimate prior to incorporating the measurement.
50
+
51
+ The update step, implemented with the method or function `update()`,
52
+ incorporates the measurement z with covariance R, into the state
53
+ estimate (x, P). The class stores the system uncertainty in S,
54
+ the innovation (residual between prediction and measurement in
55
+ measurement space) in y, and the Kalman gain in k. The procedural
56
+ form returns these variables to you. In Bayesian terms this computes
57
+ the *posterior* - the estimate after the information from the
58
+ measurement is incorporated.
59
+
60
+ Whether you use the OO form or procedural form is up to you. If
61
+ matrices such as H, R, and F are changing each epoch, you'll probably
62
+ opt to use the procedural form. If they are unchanging, the OO
63
+ form is perhaps easier to use since you won't need to keep track
64
+ of these matrices. This is especially useful if you are implementing
65
+ banks of filters or comparing various KF designs for performance;
66
+ a trivial coding bug could lead to using the wrong sets of matrices.
67
+
68
+ This module also offers an implementation of the RTS smoother, and
69
+ other helper functions, such as log likelihood computations.
70
+
71
+ The Saver class allows you to easily save the state of the
72
+ KalmanFilter class after every update
73
+
74
+ This module expects NumPy arrays for all values that expect
75
+ arrays, although in a few cases, particularly method parameters,
76
+ it will accept types that convert to NumPy arrays, such as lists
77
+ of lists. These exceptions are documented in the method or function.
78
+
79
+ Examples
80
+ --------
81
+ The following example constructs a constant velocity kinematic
82
+ filter, filters noisy data, and plots the results. It also demonstrates
83
+ using the Saver class to save the state of the filter at each epoch.
84
+
85
+ .. code-block:: Python
86
+
87
+ import matplotlib.pyplot as plt
88
+ import numpy as np
89
+ from filterpy.kalman import KalmanFilter
90
+ from filterpy.common import Q_discrete_white_noise, Saver
91
+
92
+ r_std, q_std = 2., 0.003
93
+ cv = KalmanFilter(dim_x=2, dim_z=1)
94
+ cv.x = np.array([[0., 1.]]) # position, velocity
95
+ cv.F = np.array([[1, dt],[ [0, 1]])
96
+ cv.R = np.array([[r_std^^2]])
97
+ f.H = np.array([[1., 0.]])
98
+ f.P = np.diag([.1^^2, .03^^2)
99
+ f.Q = Q_discrete_white_noise(2, dt, q_std**2)
100
+
101
+ saver = Saver(cv)
102
+ for z in range(100):
103
+ cv.predict()
104
+ cv.update([z + randn() * r_std])
105
+ saver.save() # save the filter's state
106
+
107
+ saver.to_array()
108
+ plt.plot(saver.x[:, 0])
109
+
110
+ # plot all of the priors
111
+ plt.plot(saver.x_prior[:, 0])
112
+
113
+ # plot mahalanobis distance
114
+ plt.figure()
115
+ plt.plot(saver.mahalanobis)
116
+
117
+ This code implements the same filter using the procedural form
118
+
119
+ x = np.array([[0., 1.]]) # position, velocity
120
+ F = np.array([[1, dt],[ [0, 1]])
121
+ R = np.array([[r_std^^2]])
122
+ H = np.array([[1., 0.]])
123
+ P = np.diag([.1^^2, .03^^2)
124
+ Q = Q_discrete_white_noise(2, dt, q_std**2)
125
+
126
+ for z in range(100):
127
+ x, P = predict(x, P, F=F, Q=Q)
128
+ x, P = update(x, P, z=[z + randn() * r_std], R=R, H=H)
129
+ xs.append(x[0, 0])
130
+ plt.plot(xs)
131
+
132
+
133
+ For more examples see the test subdirectory, or refer to the
134
+ book cited below. In it I both teach Kalman filtering from basic
135
+ principles, and teach the use of this library in great detail.
136
+
137
+ FilterPy library.
138
+ http://github.com/rlabbe/filterpy
139
+
140
+ Documentation at:
141
+ https://filterpy.readthedocs.org
142
+
143
+ Supporting book at:
144
+ https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
145
+
146
+ This is licensed under an MIT license. See the readme.MD file
147
+ for more information.
148
+
149
+ Copyright 2014-2018 Roger R Labbe Jr.
150
+ """
151
+
152
+ from copy import deepcopy
153
+ import sys
154
+ import numpy as np
155
+ from scipy.stats import multivariate_normal
156
+
157
+
158
+ class KalmanFilter(object):
159
+ r"""Implements a Kalman filter. You are responsible for setting the
160
+ various state variables to reasonable values; the defaults will
161
+ not give you a functional filter.
162
+
163
+ For now the best documentation is my free book Kalman and Bayesian
164
+ Filters in Python [2]_. The test files in this directory also give you a
165
+ basic idea of use, albeit without much description.
166
+
167
+ In brief, you will first construct this object, specifying the size of
168
+ the state vector with dim_x and the size of the measurement vector that
169
+ you will be using with dim_z. These are mostly used to perform size checks
170
+ when you assign values to the various matrices. For example, if you
171
+ specified dim_z=2 and then try to assign a 3x3 matrix to R (the
172
+ measurement noise matrix you will get an assert exception because R
173
+ should be 2x2. (If for whatever reason you need to alter the size of
174
+ things midstream just use the underscore version of the matrices to
175
+ assign directly: your_filter._R = a_3x3_matrix.)
176
+
177
+ After construction the filter will have default matrices created for you,
178
+ but you must specify the values for each. It’s usually easiest to just
179
+ overwrite them rather than assign to each element yourself. This will be
180
+ clearer in the example below. All are of type numpy.array.
181
+
182
+
183
+ Examples
184
+ --------
185
+
186
+ Here is a filter that tracks position and velocity using a sensor that only
187
+ reads position.
188
+
189
+ First construct the object with the required dimensionality.
190
+
191
+ .. code::
192
+
193
+ from filterpy.kalman import KalmanFilter
194
+ f = KalmanFilter (dim_x=2, dim_z=1)
195
+
196
+
197
+ Assign the initial value for the state (position and velocity). You can do this
198
+ with a two dimensional array like so:
199
+
200
+ .. code::
201
+
202
+ f.x = np.array([[2.], # position
203
+ [0.]]) # velocity
204
+
205
+ or just use a one dimensional array, which I prefer doing.
206
+
207
+ .. code::
208
+
209
+ f.x = np.array([2., 0.])
210
+
211
+
212
+ Define the state transition matrix:
213
+
214
+ .. code::
215
+
216
+ f.F = np.array([[1.,1.],
217
+ [0.,1.]])
218
+
219
+ Define the measurement function:
220
+
221
+ .. code::
222
+
223
+ f.H = np.array([[1.,0.]])
224
+
225
+ Define the covariance matrix. Here I take advantage of the fact that
226
+ P already contains np.eye(dim_x), and just multiply by the uncertainty:
227
+
228
+ .. code::
229
+
230
+ f.P *= 1000.
231
+
232
+ I could have written:
233
+
234
+ .. code::
235
+
236
+ f.P = np.array([[1000., 0.],
237
+ [ 0., 1000.] ])
238
+
239
+ You decide which is more readable and understandable.
240
+
241
+ Now assign the measurement noise. Here the dimension is 1x1, so I can
242
+ use a scalar
243
+
244
+ .. code::
245
+
246
+ f.R = 5
247
+
248
+ I could have done this instead:
249
+
250
+ .. code::
251
+
252
+ f.R = np.array([[5.]])
253
+
254
+ Note that this must be a 2 dimensional array, as must all the matrices.
255
+
256
+ Finally, I will assign the process noise. Here I will take advantage of
257
+ another FilterPy library function:
258
+
259
+ .. code::
260
+
261
+ from filterpy.common import Q_discrete_white_noise
262
+ f.Q = Q_discrete_white_noise(dim=2, dt=0.1, var=0.13)
263
+
264
+
265
+ Now just perform the standard predict/update loop:
266
+
267
+ while some_condition_is_true:
268
+
269
+ .. code::
270
+
271
+ z = get_sensor_reading()
272
+ f.predict()
273
+ f.update(z)
274
+
275
+ do_something_with_estimate (f.x)
276
+
277
+
278
+ **Procedural Form**
279
+
280
+ This module also contains stand alone functions to perform Kalman filtering.
281
+ Use these if you are not a fan of objects.
282
+
283
+ **Example**
284
+
285
+ .. code::
286
+
287
+ while True:
288
+ z, R = read_sensor()
289
+ x, P = predict(x, P, F, Q)
290
+ x, P = update(x, P, z, R, H)
291
+
292
+ See my book Kalman and Bayesian Filters in Python [2]_.
293
+
294
+
295
+ You will have to set the following attributes after constructing this
296
+ object for the filter to perform properly. Please note that there are
297
+ various checks in place to ensure that you have made everything the
298
+ 'correct' size. However, it is possible to provide incorrectly sized
299
+ arrays such that the linear algebra can not perform an operation.
300
+ It can also fail silently - you can end up with matrices of a size that
301
+ allows the linear algebra to work, but are the wrong shape for the problem
302
+ you are trying to solve.
303
+
304
+ Parameters
305
+ ----------
306
+ dim_x : int
307
+ Number of state variables for the Kalman filter. For example, if
308
+ you are tracking the position and velocity of an object in two
309
+ dimensions, dim_x would be 4.
310
+ This is used to set the default size of P, Q, and u
311
+
312
+ dim_z : int
313
+ Number of of measurement inputs. For example, if the sensor
314
+ provides you with position in (x,y), dim_z would be 2.
315
+
316
+ dim_u : int (optional)
317
+ size of the control input, if it is being used.
318
+ Default value of 0 indicates it is not used.
319
+
320
+ compute_log_likelihood : bool (default = True)
321
+ Computes log likelihood by default, but this can be a slow
322
+ computation, so if you never use it you can turn this computation
323
+ off.
324
+
325
+ Attributes
326
+ ----------
327
+ x : numpy.array(dim_x, 1)
328
+ Current state estimate. Any call to update() or predict() updates
329
+ this variable.
330
+
331
+ P : numpy.array(dim_x, dim_x)
332
+ Current state covariance matrix. Any call to update() or predict()
333
+ updates this variable.
334
+
335
+ x_prior : numpy.array(dim_x, 1)
336
+ Prior (predicted) state estimate. The *_prior and *_post attributes
337
+ are for convienence; they store the prior and posterior of the
338
+ current epoch. Read Only.
339
+
340
+ P_prior : numpy.array(dim_x, dim_x)
341
+ Prior (predicted) state covariance matrix. Read Only.
342
+
343
+ x_post : numpy.array(dim_x, 1)
344
+ Posterior (updated) state estimate. Read Only.
345
+
346
+ P_post : numpy.array(dim_x, dim_x)
347
+ Posterior (updated) state covariance matrix. Read Only.
348
+
349
+ z : numpy.array
350
+ Last measurement used in update(). Read only.
351
+
352
+ R : numpy.array(dim_z, dim_z)
353
+ Measurement noise matrix
354
+
355
+ Q : numpy.array(dim_x, dim_x)
356
+ Process noise matrix
357
+
358
+ F : numpy.array()
359
+ State Transition matrix
360
+
361
+ H : numpy.array(dim_z, dim_x)
362
+ Measurement function
363
+
364
+ y : numpy.array
365
+ Residual of the update step. Read only.
366
+
367
+ K : numpy.array(dim_x, dim_z)
368
+ Kalman gain of the update step. Read only.
369
+
370
+ S : numpy.array
371
+ System uncertainty (P projected to measurement space). Read only.
372
+
373
+ SI : numpy.array
374
+ Inverse system uncertainty. Read only.
375
+
376
+ log_likelihood : float
377
+ log-likelihood of the last measurement. Read only.
378
+
379
+ likelihood : float
380
+ likelihood of last measurement. Read only.
381
+
382
+ Computed from the log-likelihood. The log-likelihood can be very
383
+ small, meaning a large negative value such as -28000. Taking the
384
+ exp() of that results in 0.0, which can break typical algorithms
385
+ which multiply by this value, so by default we always return a
386
+ number >= sys.float_info.min.
387
+
388
+ mahalanobis : float
389
+ mahalanobis distance of the innovation. Read only.
390
+
391
+ inv : function, default numpy.linalg.inv
392
+ If you prefer another inverse function, such as the Moore-Penrose
393
+ pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
394
+
395
+ This is only used to invert self.S. If you know it is diagonal, you
396
+ might choose to set it to filterpy.common.inv_diagonal, which is
397
+ several times faster than numpy.linalg.inv for diagonal matrices.
398
+
399
+ alpha : float
400
+ Fading memory setting. 1.0 gives the normal Kalman filter, and
401
+ values slightly larger than 1.0 (such as 1.02) give a fading
402
+ memory effect - previous measurements have less influence on the
403
+ filter's estimates. This formulation of the Fading memory filter
404
+ (there are many) is due to Dan Simon [1]_.
405
+
406
+ References
407
+ ----------
408
+
409
+ .. [1] Dan Simon. "Optimal State Estimation." John Wiley & Sons.
410
+ p. 208-212. (2006)
411
+
412
+ .. [2] Roger Labbe. "Kalman and Bayesian Filters in Python"
413
+ https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
414
+
415
+ """
416
+
417
+ def __init__(self, dim_x, dim_z, dim_u=0):
418
+ if dim_x < 1:
419
+ raise ValueError("dim_x must be 1 or greater")
420
+ if dim_z < 1:
421
+ raise ValueError("dim_z must be 1 or greater")
422
+ if dim_u < 0:
423
+ raise ValueError("dim_u must be 0 or greater")
424
+
425
+ self.dim_x = dim_x
426
+ self.dim_z = dim_z
427
+ self.dim_u = dim_u
428
+
429
+ self.x = np.zeros((dim_x, 1)) # state
430
+ self.P = np.eye(dim_x) # uncertainty covariance
431
+ self.Q = np.eye(dim_x) # process uncertainty
432
+ self.B = None # control transition matrix
433
+ self.F = np.eye(dim_x) # state transition matrix
434
+ self.H = np.zeros((dim_z, dim_x)) # Measurement function
435
+ self.R = np.eye(dim_z) # state uncertainty
436
+ self._alpha_sq = 1.0 # fading memory control
437
+ self.M = np.zeros((dim_z, dim_z)) # process-measurement cross correlation
438
+ self.z = np.array([[None] * self.dim_z]).T
439
+
440
+ # gain and residual are computed during the innovation step. We
441
+ # save them so that in case you want to inspect them for various
442
+ # purposes
443
+ self.K = np.zeros((dim_x, dim_z)) # kalman gain
444
+ self.y = np.zeros((dim_z, 1))
445
+ self.S = np.zeros((dim_z, dim_z)) # system uncertainty
446
+ self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty
447
+
448
+ # identity matrix. Do not alter this.
449
+ self._I = np.eye(dim_x)
450
+
451
+ # these will always be a copy of x,P after predict() is called
452
+ self.x_prior = self.x.copy()
453
+ self.P_prior = self.P.copy()
454
+
455
+ # these will always be a copy of x,P after update() is called
456
+ self.x_post = self.x.copy()
457
+ self.P_post = self.P.copy()
458
+
459
+ # Only computed only if requested via property
460
+ self._log_likelihood = np.log(sys.float_info.min)
461
+ self._likelihood = sys.float_info.min
462
+ self._mahalanobis = None
463
+
464
+ self.inv = np.linalg.inv
465
+
466
+ def predict(self, u=None, B=None, F=None, Q=None):
467
+ """
468
+ Predict next state (prior) using the Kalman filter state propagation
469
+ equations.
470
+
471
+ Parameters
472
+ ----------
473
+
474
+ u : np.array
475
+ Optional control vector. If not `None`, it is multiplied by B
476
+ to create the control input into the system.
477
+
478
+ B : np.array(dim_x, dim_z), or None
479
+ Optional control transition matrix; a value of None
480
+ will cause the filter to use `self.B`.
481
+
482
+ F : np.array(dim_x, dim_x), or None
483
+ Optional state transition matrix; a value of None
484
+ will cause the filter to use `self.F`.
485
+
486
+ Q : np.array(dim_x, dim_x), scalar, or None
487
+ Optional process noise matrix; a value of None will cause the
488
+ filter to use `self.Q`.
489
+ """
490
+
491
+ if B is None:
492
+ B = self.B
493
+ if F is None:
494
+ F = self.F
495
+ if Q is None:
496
+ Q = self.Q
497
+ elif np.isscalar(Q):
498
+ Q = np.eye(self.dim_x) * Q
499
+
500
+ # x = Fx + Bu
501
+ if B is not None and u is not None:
502
+ self.x = np.dot(F, self.x) + np.dot(B, u)
503
+ else:
504
+ self.x = np.dot(F, self.x)
505
+
506
+ # P = FPF' + Q
507
+ self.P = self._alpha_sq * np.dot(np.dot(F, self.P), F.T) + Q
508
+
509
+ # save prior
510
+ self.x_prior = self.x.copy()
511
+ self.P_prior = self.P.copy()
512
+
513
+ def update(self, z, R=None, H=None):
514
+ """
515
+ Add a new measurement (z) to the Kalman filter.
516
+
517
+ If z is None, nothing is computed. However, x_post and P_post are
518
+ updated with the prior (x_prior, P_prior), and self.z is set to None.
519
+
520
+ Parameters
521
+ ----------
522
+ z : (dim_z, 1): array_like
523
+ measurement for this update. z can be a scalar if dim_z is 1,
524
+ otherwise it must be convertible to a column vector.
525
+
526
+ R : np.array, scalar, or None
527
+ Optionally provide R to override the measurement noise for this
528
+ one call, otherwise self.R will be used.
529
+
530
+ H : np.array, or None
531
+ Optionally provide H to override the measurement function for this
532
+ one call, otherwise self.H will be used.
533
+ """
534
+
535
+ # set to None to force recompute
536
+ self._log_likelihood = None
537
+ self._likelihood = None
538
+ self._mahalanobis = None
539
+
540
+ if z is None:
541
+ self.z = np.array([[None] * self.dim_z]).T
542
+ self.x_post = self.x.copy()
543
+ self.P_post = self.P.copy()
544
+ self.y = np.zeros((self.dim_z, 1))
545
+ return
546
+
547
+ z = reshape_z(z, self.dim_z, self.x.ndim)
548
+
549
+ if R is None:
550
+ R = self.R
551
+ elif np.isscalar(R):
552
+ R = np.eye(self.dim_z) * R
553
+
554
+ if H is None:
555
+ H = self.H
556
+
557
+ # y = z - Hx
558
+ # error (residual) between measurement and prediction
559
+ self.y = z - np.dot(H, self.x)
560
+
561
+ # common subexpression for speed
562
+ PHT = np.dot(self.P, H.T)
563
+
564
+ # S = HPH' + R
565
+ # project system uncertainty into measurement space
566
+ self.S = np.dot(H, PHT) + R
567
+ self.SI = self.inv(self.S)
568
+ # K = PH'inv(S)
569
+ # map system uncertainty into kalman gain
570
+ self.K = np.dot(PHT, self.SI)
571
+
572
+ # x = x + Ky
573
+ # predict new x with residual scaled by the kalman gain
574
+ self.x = self.x + np.dot(self.K, self.y)
575
+
576
+ # P = (I-KH)P(I-KH)' + KRK'
577
+ # This is more numerically stable
578
+ # and works for non-optimal K vs the equation
579
+ # P = (I-KH)P usually seen in the literature.
580
+
581
+ I_KH = self._I - np.dot(self.K, H)
582
+ self.P = np.dot(np.dot(I_KH, self.P), I_KH.T) + np.dot(
583
+ np.dot(self.K, R), self.K.T
584
+ )
585
+
586
+ # save measurement and posterior state
587
+ self.z = deepcopy(z)
588
+ self.x_post = self.x.copy()
589
+ self.P_post = self.P.copy()
590
+
591
+ def predict_steadystate(self, u=0, B=None):
592
+ """
593
+ Predict state (prior) using the Kalman filter state propagation
594
+ equations. Only x is updated, P is left unchanged. See
595
+ update_steadstate() for a longer explanation of when to use this
596
+ method.
597
+
598
+ Parameters
599
+ ----------
600
+
601
+ u : np.array
602
+ Optional control vector. If non-zero, it is multiplied by B
603
+ to create the control input into the system.
604
+
605
+ B : np.array(dim_x, dim_z), or None
606
+ Optional control transition matrix; a value of None
607
+ will cause the filter to use `self.B`.
608
+ """
609
+
610
+ if B is None:
611
+ B = self.B
612
+
613
+ # x = Fx + Bu
614
+ if B is not None:
615
+ self.x = np.dot(self.F, self.x) + np.dot(B, u)
616
+ else:
617
+ self.x = np.dot(self.F, self.x)
618
+
619
+ # save prior
620
+ self.x_prior = self.x.copy()
621
+ self.P_prior = self.P.copy()
622
+
623
+ def update_steadystate(self, z):
624
+ """
625
+ Add a new measurement (z) to the Kalman filter without recomputing
626
+ the Kalman gain K, the state covariance P, or the system
627
+ uncertainty S.
628
+
629
+ You can use this for LTI systems since the Kalman gain and covariance
630
+ converge to a fixed value. Precompute these and assign them explicitly,
631
+ or run the Kalman filter using the normal predict()/update(0 cycle
632
+ until they converge.
633
+
634
+ The main advantage of this call is speed. We do significantly less
635
+ computation, notably avoiding a costly matrix inversion.
636
+
637
+ Use in conjunction with predict_steadystate(), otherwise P will grow
638
+ without bound.
639
+
640
+ Parameters
641
+ ----------
642
+ z : (dim_z, 1): array_like
643
+ measurement for this update. z can be a scalar if dim_z is 1,
644
+ otherwise it must be convertible to a column vector.
645
+
646
+
647
+ Examples
648
+ --------
649
+ >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter
650
+ >>> # let filter converge on representative data, then save k and P
651
+ >>> for i in range(100):
652
+ >>> cv.predict()
653
+ >>> cv.update([i, i, i])
654
+ >>> saved_k = np.copy(cv.K)
655
+ >>> saved_P = np.copy(cv.P)
656
+
657
+ later on:
658
+
659
+ >>> cv = kinematic_kf(dim=3, order=2) # 3D const velocity filter
660
+ >>> cv.K = np.copy(saved_K)
661
+ >>> cv.P = np.copy(saved_P)
662
+ >>> for i in range(100):
663
+ >>> cv.predict_steadystate()
664
+ >>> cv.update_steadystate([i, i, i])
665
+ """
666
+
667
+ # set to None to force recompute
668
+ self._log_likelihood = None
669
+ self._likelihood = None
670
+ self._mahalanobis = None
671
+
672
+ if z is None:
673
+ self.z = np.array([[None] * self.dim_z]).T
674
+ self.x_post = self.x.copy()
675
+ self.P_post = self.P.copy()
676
+ self.y = np.zeros((self.dim_z, 1))
677
+ return
678
+
679
+ z = reshape_z(z, self.dim_z, self.x.ndim)
680
+
681
+ # y = z - Hx
682
+ # error (residual) between measurement and prediction
683
+ self.y = z - np.dot(self.H, self.x)
684
+
685
+ # x = x + Ky
686
+ # predict new x with residual scaled by the kalman gain
687
+ self.x = self.x + np.dot(self.K, self.y)
688
+
689
+ self.z = deepcopy(z)
690
+ self.x_post = self.x.copy()
691
+ self.P_post = self.P.copy()
692
+
693
+ # set to None to force recompute
694
+ self._log_likelihood = None
695
+ self._likelihood = None
696
+ self._mahalanobis = None
697
+
698
+ def update_correlated(self, z, R=None, H=None):
699
+ """Add a new measurement (z) to the Kalman filter assuming that
700
+ process noise and measurement noise are correlated as defined in
701
+ the `self.M` matrix.
702
+
703
+ If z is None, nothing is changed.
704
+
705
+ Parameters
706
+ ----------
707
+ z : (dim_z, 1): array_like
708
+ measurement for this update. z can be a scalar if dim_z is 1,
709
+ otherwise it must be convertible to a column vector.
710
+
711
+ R : np.array, scalar, or None
712
+ Optionally provide R to override the measurement noise for this
713
+ one call, otherwise self.R will be used.
714
+
715
+ H : np.array, or None
716
+ Optionally provide H to override the measurement function for this
717
+ one call, otherwise self.H will be used.
718
+ """
719
+
720
+ # set to None to force recompute
721
+ self._log_likelihood = None
722
+ self._likelihood = None
723
+ self._mahalanobis = None
724
+
725
+ if z is None:
726
+ self.z = np.array([[None] * self.dim_z]).T
727
+ self.x_post = self.x.copy()
728
+ self.P_post = self.P.copy()
729
+ self.y = np.zeros((self.dim_z, 1))
730
+ return
731
+
732
+ z = reshape_z(z, self.dim_z, self.x.ndim)
733
+
734
+ if R is None:
735
+ R = self.R
736
+ elif np.isscalar(R):
737
+ R = np.eye(self.dim_z) * R
738
+
739
+ # rename for readability and a tiny extra bit of speed
740
+ if H is None:
741
+ H = self.H
742
+
743
+ # handle special case: if z is in form [[z]] but x is not a column
744
+ # vector dimensions will not match
745
+ if self.x.ndim == 1 and np.shape(z) == (1, 1):
746
+ z = z[0]
747
+
748
+ if np.shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
749
+ z = np.asarray([z])
750
+
751
+ # y = z - Hx
752
+ # error (residual) between measurement and prediction
753
+ self.y = z - np.dot(H, self.x)
754
+
755
+ # common subexpression for speed
756
+ PHT = np.dot(self.P, H.T)
757
+
758
+ # project system uncertainty into measurement space
759
+ self.S = np.dot(H, PHT) + np.dot(H, self.M) + np.dot(self.M.T, H.T) + R
760
+ self.SI = self.inv(self.S)
761
+
762
+ # K = PH'inv(S)
763
+ # map system uncertainty into kalman gain
764
+ self.K = np.dot(PHT + self.M, self.SI)
765
+
766
+ # x = x + Ky
767
+ # predict new x with residual scaled by the kalman gain
768
+ self.x = self.x + np.dot(self.K, self.y)
769
+ self.P = self.P - np.dot(self.K, np.dot(H, self.P) + self.M.T)
770
+
771
+ self.z = deepcopy(z)
772
+ self.x_post = self.x.copy()
773
+ self.P_post = self.P.copy()
774
+
775
+ def batch_filter(
776
+ self,
777
+ zs,
778
+ Fs=None,
779
+ Qs=None,
780
+ Hs=None,
781
+ Rs=None,
782
+ Bs=None,
783
+ us=None,
784
+ update_first=False,
785
+ saver=None,
786
+ ):
787
+ """Batch processes a sequences of measurements.
788
+
789
+ Parameters
790
+ ----------
791
+
792
+ zs : list-like
793
+ list of measurements at each time step `self.dt`. Missing
794
+ measurements must be represented by `None`.
795
+
796
+ Fs : None, list-like, default=None
797
+ optional value or list of values to use for the state transition
798
+ matrix F.
799
+
800
+ If Fs is None then self.F is used for all epochs.
801
+
802
+ Otherwise it must contain a list-like list of F's, one for
803
+ each epoch. This allows you to have varying F per epoch.
804
+
805
+ Qs : None, np.array or list-like, default=None
806
+ optional value or list of values to use for the process error
807
+ covariance Q.
808
+
809
+ If Qs is None then self.Q is used for all epochs.
810
+
811
+ Otherwise it must contain a list-like list of Q's, one for
812
+ each epoch. This allows you to have varying Q per epoch.
813
+
814
+ Hs : None, np.array or list-like, default=None
815
+ optional list of values to use for the measurement matrix H.
816
+
817
+ If Hs is None then self.H is used for all epochs.
818
+
819
+ If Hs contains a single matrix, then it is used as H for all
820
+ epochs.
821
+
822
+ Otherwise it must contain a list-like list of H's, one for
823
+ each epoch. This allows you to have varying H per epoch.
824
+
825
+ Rs : None, np.array or list-like, default=None
826
+ optional list of values to use for the measurement error
827
+ covariance R.
828
+
829
+ If Rs is None then self.R is used for all epochs.
830
+
831
+ Otherwise it must contain a list-like list of R's, one for
832
+ each epoch. This allows you to have varying R per epoch.
833
+
834
+ Bs : None, np.array or list-like, default=None
835
+ optional list of values to use for the control transition matrix B.
836
+
837
+ If Bs is None then self.B is used for all epochs.
838
+
839
+ Otherwise it must contain a list-like list of B's, one for
840
+ each epoch. This allows you to have varying B per epoch.
841
+
842
+ us : None, np.array or list-like, default=None
843
+ optional list of values to use for the control input vector;
844
+
845
+ If us is None then None is used for all epochs (equivalent to 0,
846
+ or no control input).
847
+
848
+ Otherwise it must contain a list-like list of u's, one for
849
+ each epoch.
850
+
851
+ update_first : bool, optional, default=False
852
+ controls whether the order of operations is update followed by
853
+ predict, or predict followed by update. Default is predict->update.
854
+
855
+ saver : filterpy.common.Saver, optional
856
+ filterpy.common.Saver object. If provided, saver.save() will be
857
+ called after every epoch
858
+
859
+ Returns
860
+ -------
861
+
862
+ means : np.array((n,dim_x,1))
863
+ array of the state for each time step after the update. Each entry
864
+ is an np.array. In other words `means[k,:]` is the state at step
865
+ `k`.
866
+
867
+ covariance : np.array((n,dim_x,dim_x))
868
+ array of the covariances for each time step after the update.
869
+ In other words `covariance[k,:,:]` is the covariance at step `k`.
870
+
871
+ means_predictions : np.array((n,dim_x,1))
872
+ array of the state for each time step after the predictions. Each
873
+ entry is an np.array. In other words `means[k,:]` is the state at
874
+ step `k`.
875
+
876
+ covariance_predictions : np.array((n,dim_x,dim_x))
877
+ array of the covariances for each time step after the prediction.
878
+ In other words `covariance[k,:,:]` is the covariance at step `k`.
879
+
880
+ Examples
881
+ --------
882
+
883
+ .. code-block:: Python
884
+
885
+ # this example demonstrates tracking a measurement where the time
886
+ # between measurement varies, as stored in dts. This requires
887
+ # that F be recomputed for each epoch. The output is then smoothed
888
+ # with an RTS smoother.
889
+
890
+ zs = [t + random.randn()*4 for t in range (40)]
891
+ Fs = [np.array([[1., dt], [0, 1]] for dt in dts]
892
+
893
+ (mu, cov, _, _) = kf.batch_filter(zs, Fs=Fs)
894
+ (xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs)
895
+ """
896
+
897
+ # pylint: disable=too-many-statements
898
+ n = np.size(zs, 0)
899
+ if Fs is None:
900
+ Fs = [self.F] * n
901
+ if Qs is None:
902
+ Qs = [self.Q] * n
903
+ if Hs is None:
904
+ Hs = [self.H] * n
905
+ if Rs is None:
906
+ Rs = [self.R] * n
907
+ if Bs is None:
908
+ Bs = [self.B] * n
909
+ if us is None:
910
+ us = [0] * n
911
+
912
+ # mean estimates from Kalman Filter
913
+ if self.x.ndim == 1:
914
+ means = np.zeros((n, self.dim_x))
915
+ means_p = np.zeros((n, self.dim_x))
916
+ else:
917
+ means = np.zeros((n, self.dim_x, 1))
918
+ means_p = np.zeros((n, self.dim_x, 1))
919
+
920
+ # state covariances from Kalman Filter
921
+ covariances = np.zeros((n, self.dim_x, self.dim_x))
922
+ covariances_p = np.zeros((n, self.dim_x, self.dim_x))
923
+
924
+ if update_first:
925
+ for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
926
+ self.update(z, R=R, H=H)
927
+ means[i, :] = self.x
928
+ covariances[i, :, :] = self.P
929
+
930
+ self.predict(u=u, B=B, F=F, Q=Q)
931
+ means_p[i, :] = self.x
932
+ covariances_p[i, :, :] = self.P
933
+
934
+ if saver is not None:
935
+ saver.save()
936
+ else:
937
+ for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
938
+ self.predict(u=u, B=B, F=F, Q=Q)
939
+ means_p[i, :] = self.x
940
+ covariances_p[i, :, :] = self.P
941
+
942
+ self.update(z, R=R, H=H)
943
+ means[i, :] = self.x
944
+ covariances[i, :, :] = self.P
945
+
946
+ if saver is not None:
947
+ saver.save()
948
+
949
+ return (means, covariances, means_p, covariances_p)
950
+
951
+ def rts_smoother(self, Xs, Ps, Fs=None, Qs=None, inv=np.linalg.inv):
952
+ """
953
+ Runs the Rauch-Tung-Striebal Kalman smoother on a set of
954
+ means and covariances computed by a Kalman filter. The usual input
955
+ would come from the output of `KalmanFilter.batch_filter()`.
956
+
957
+ Parameters
958
+ ----------
959
+
960
+ Xs : numpy.array
961
+ array of the means (state variable x) of the output of a Kalman
962
+ filter.
963
+
964
+ Ps : numpy.array
965
+ array of the covariances of the output of a kalman filter.
966
+
967
+ Fs : list-like collection of numpy.array, optional
968
+ State transition matrix of the Kalman filter at each time step.
969
+ Optional, if not provided the filter's self.F will be used
970
+
971
+ Qs : list-like collection of numpy.array, optional
972
+ Process noise of the Kalman filter at each time step. Optional,
973
+ if not provided the filter's self.Q will be used
974
+
975
+ inv : function, default numpy.linalg.inv
976
+ If you prefer another inverse function, such as the Moore-Penrose
977
+ pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
978
+
979
+
980
+ Returns
981
+ -------
982
+
983
+ x : numpy.ndarray
984
+ smoothed means
985
+
986
+ P : numpy.ndarray
987
+ smoothed state covariances
988
+
989
+ K : numpy.ndarray
990
+ smoother gain at each step
991
+
992
+ Pp : numpy.ndarray
993
+ Predicted state covariances
994
+
995
+ Examples
996
+ --------
997
+
998
+ .. code-block:: Python
999
+
1000
+ zs = [t + random.randn()*4 for t in range (40)]
1001
+
1002
+ (mu, cov, _, _) = kalman.batch_filter(zs)
1003
+ (x, P, K, Pp) = rts_smoother(mu, cov, kf.F, kf.Q)
1004
+
1005
+ """
1006
+
1007
+ if len(Xs) != len(Ps):
1008
+ raise ValueError("length of Xs and Ps must be the same")
1009
+
1010
+ n = Xs.shape[0]
1011
+ dim_x = Xs.shape[1]
1012
+
1013
+ if Fs is None:
1014
+ Fs = [self.F] * n
1015
+ if Qs is None:
1016
+ Qs = [self.Q] * n
1017
+
1018
+ # smoother gain
1019
+ K = np.zeros((n, dim_x, dim_x))
1020
+
1021
+ x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy()
1022
+ for k in range(n - 2, -1, -1):
1023
+ Pp[k] = np.dot(np.dot(Fs[k + 1], P[k]), Fs[k + 1].T) + Qs[k + 1]
1024
+
1025
+ # pylint: disable=bad-whitespace
1026
+ K[k] = np.dot(np.dot(P[k], Fs[k + 1].T), inv(Pp[k]))
1027
+ x[k] += np.dot(K[k], x[k + 1] - np.dot(Fs[k + 1], x[k]))
1028
+ P[k] += np.dot(np.dot(K[k], P[k + 1] - Pp[k]), K[k].T)
1029
+
1030
+ return (x, P, K, Pp)
1031
+
1032
+ def get_prediction(self, u=0):
1033
+ """
1034
+ Predicts the next state of the filter and returns it without
1035
+ altering the state of the filter.
1036
+
1037
+ Parameters
1038
+ ----------
1039
+
1040
+ u : np.array
1041
+ optional control input
1042
+
1043
+ Returns
1044
+ -------
1045
+
1046
+ (x, P) : tuple
1047
+ State vector and covariance array of the prediction.
1048
+ """
1049
+
1050
+ x = np.dot(self.F, self.x) + np.dot(self.B, u)
1051
+ P = self._alpha_sq * np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
1052
+ return (x, P)
1053
+
1054
+ def get_update(self, z=None):
1055
+ """
1056
+ Computes the new estimate based on measurement `z` and returns it
1057
+ without altering the state of the filter.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+
1062
+ z : (dim_z, 1): array_like
1063
+ measurement for this update. z can be a scalar if dim_z is 1,
1064
+ otherwise it must be convertible to a column vector.
1065
+
1066
+ Returns
1067
+ -------
1068
+
1069
+ (x, P) : tuple
1070
+ State vector and covariance array of the update.
1071
+ """
1072
+
1073
+ if z is None:
1074
+ return self.x, self.P
1075
+ z = reshape_z(z, self.dim_z, self.x.ndim)
1076
+
1077
+ R = self.R
1078
+ H = self.H
1079
+ P = self.P
1080
+ x = self.x
1081
+
1082
+ # error (residual) between measurement and prediction
1083
+ y = z - np.dot(H, x)
1084
+
1085
+ # common subexpression for speed
1086
+ PHT = np.dot(P, H.T)
1087
+
1088
+ # project system uncertainty into measurement space
1089
+ S = np.dot(H, PHT) + R
1090
+
1091
+ # map system uncertainty into kalman gain
1092
+ K = np.dot(PHT, self.inv(S))
1093
+
1094
+ # predict new x with residual scaled by the kalman gain
1095
+ x = x + np.dot(K, y)
1096
+
1097
+ # P = (I-KH)P(I-KH)' + KRK'
1098
+ I_KH = self._I - np.dot(K, H)
1099
+ P = np.dot(np.dot(I_KH, P), I_KH.T) + np.dot(np.dot(K, R), K.T)
1100
+
1101
+ return x, P
1102
+
1103
+ def residual_of(self, z):
1104
+ """
1105
+ Returns the residual for the given measurement (z). Does not alter
1106
+ the state of the filter.
1107
+ """
1108
+ return z - np.dot(self.H, self.x_prior)
1109
+
1110
+ def measurement_of_state(self, x):
1111
+ """
1112
+ Helper function that converts a state into a measurement.
1113
+
1114
+ Parameters
1115
+ ----------
1116
+
1117
+ x : np.array
1118
+ kalman state vector
1119
+
1120
+ Returns
1121
+ -------
1122
+
1123
+ z : (dim_z, 1): array_like
1124
+ measurement for this update. z can be a scalar if dim_z is 1,
1125
+ otherwise it must be convertible to a column vector.
1126
+ """
1127
+
1128
+ return np.dot(self.H, x)
1129
+
1130
+ @property
1131
+ def log_likelihood(self):
1132
+ """
1133
+ log-likelihood of the last measurement.
1134
+ """
1135
+ if self._log_likelihood is None:
1136
+ self._log_likelihood = logpdf(x=self.y, cov=self.S)
1137
+ return self._log_likelihood
1138
+
1139
+ @property
1140
+ def likelihood(self):
1141
+ """
1142
+ Computed from the log-likelihood. The log-likelihood can be very
1143
+ small, meaning a large negative value such as -28000. Taking the
1144
+ exp() of that results in 0.0, which can break typical algorithms
1145
+ which multiply by this value, so by default we always return a
1146
+ number >= sys.float_info.min.
1147
+ """
1148
+ if self._likelihood is None:
1149
+ self._likelihood = np.exp(self.log_likelihood)
1150
+ if self._likelihood == 0:
1151
+ self._likelihood = sys.float_info.min
1152
+ return self._likelihood
1153
+
1154
+ @property
1155
+ def mahalanobis(self):
1156
+ """ "
1157
+ Mahalanobis distance of measurement. E.g. 3 means measurement
1158
+ was 3 standard deviations away from the predicted value.
1159
+
1160
+ Returns
1161
+ -------
1162
+ mahalanobis : float
1163
+ """
1164
+ if self._mahalanobis is None:
1165
+ self._mahalanobis = np.sqrt(
1166
+ float(np.dot(np.dot(self.y.T, self.SI), self.y))
1167
+ )
1168
+ return self._mahalanobis
1169
+
1170
+ @property
1171
+ def alpha(self):
1172
+ """
1173
+ Fading memory setting. 1.0 gives the normal Kalman filter, and
1174
+ values slightly larger than 1.0 (such as 1.02) give a fading
1175
+ memory effect - previous measurements have less influence on the
1176
+ filter's estimates. This formulation of the Fading memory filter
1177
+ (there are many) is due to Dan Simon [1]_.
1178
+ """
1179
+ return self._alpha_sq**0.5
1180
+
1181
+ def log_likelihood_of(self, z):
1182
+ """
1183
+ log likelihood of the measurement `z`. This should only be called
1184
+ after a call to update(). Calling after predict() will yield an
1185
+ incorrect result."""
1186
+
1187
+ if z is None:
1188
+ return np.log(sys.float_info.min)
1189
+ return logpdf(z, np.dot(self.H, self.x), self.S)
1190
+
1191
+ @alpha.setter
1192
+ def alpha(self, value):
1193
+ if not np.isscalar(value) or value < 1:
1194
+ raise ValueError("alpha must be a float greater than 1")
1195
+
1196
+ self._alpha_sq = value**2
1197
+
1198
+ def __repr__(self):
1199
+ return "\n".join(
1200
+ [
1201
+ "KalmanFilter object",
1202
+ pretty_str("dim_x", self.dim_x),
1203
+ pretty_str("dim_z", self.dim_z),
1204
+ pretty_str("dim_u", self.dim_u),
1205
+ pretty_str("x", self.x),
1206
+ pretty_str("P", self.P),
1207
+ pretty_str("x_prior", self.x_prior),
1208
+ pretty_str("P_prior", self.P_prior),
1209
+ pretty_str("x_post", self.x_post),
1210
+ pretty_str("P_post", self.P_post),
1211
+ pretty_str("F", self.F),
1212
+ pretty_str("Q", self.Q),
1213
+ pretty_str("R", self.R),
1214
+ pretty_str("H", self.H),
1215
+ pretty_str("K", self.K),
1216
+ pretty_str("y", self.y),
1217
+ pretty_str("S", self.S),
1218
+ pretty_str("SI", self.SI),
1219
+ pretty_str("M", self.M),
1220
+ pretty_str("B", self.B),
1221
+ pretty_str("z", self.z),
1222
+ pretty_str("log-likelihood", self.log_likelihood),
1223
+ pretty_str("likelihood", self.likelihood),
1224
+ pretty_str("mahalanobis", self.mahalanobis),
1225
+ pretty_str("alpha", self.alpha),
1226
+ pretty_str("inv", self.inv),
1227
+ ]
1228
+ )
1229
+
1230
+ def test_matrix_dimensions(self, z=None, H=None, R=None, F=None, Q=None):
1231
+ """
1232
+ Performs a series of asserts to check that the size of everything
1233
+ is what it should be. This can help you debug problems in your design.
1234
+
1235
+ If you pass in H, R, F, Q those will be used instead of this object's
1236
+ value for those matrices.
1237
+
1238
+ Testing `z` (the measurement) is problamatic. x is a vector, and can be
1239
+ implemented as either a 1D array or as a nx1 column vector. Thus Hx
1240
+ can be of different shapes. Then, if Hx is a single value, it can
1241
+ be either a 1D array or 2D vector. If either is true, z can reasonably
1242
+ be a scalar (either '3' or np.array('3') are scalars under this
1243
+ definition), a 1D, 1 element array, or a 2D, 1 element array. You are
1244
+ allowed to pass in any combination that works.
1245
+ """
1246
+
1247
+ if H is None:
1248
+ H = self.H
1249
+ if R is None:
1250
+ R = self.R
1251
+ if F is None:
1252
+ F = self.F
1253
+ if Q is None:
1254
+ Q = self.Q
1255
+ x = self.x
1256
+ P = self.P
1257
+
1258
+ assert (
1259
+ x.ndim == 1 or x.ndim == 2
1260
+ ), "x must have one or two dimensions, but has {}".format(x.ndim)
1261
+
1262
+ if x.ndim == 1:
1263
+ assert (
1264
+ x.shape[0] == self.dim_x
1265
+ ), "Shape of x must be ({},{}), but is {}".format(self.dim_x, 1, x.shape)
1266
+ else:
1267
+ assert x.shape == (
1268
+ self.dim_x,
1269
+ 1,
1270
+ ), "Shape of x must be ({},{}), but is {}".format(self.dim_x, 1, x.shape)
1271
+
1272
+ assert P.shape == (
1273
+ self.dim_x,
1274
+ self.dim_x,
1275
+ ), "Shape of P must be ({},{}), but is {}".format(
1276
+ self.dim_x, self.dim_x, P.shape
1277
+ )
1278
+
1279
+ assert Q.shape == (
1280
+ self.dim_x,
1281
+ self.dim_x,
1282
+ ), "Shape of P must be ({},{}), but is {}".format(
1283
+ self.dim_x, self.dim_x, P.shape
1284
+ )
1285
+
1286
+ assert F.shape == (
1287
+ self.dim_x,
1288
+ self.dim_x,
1289
+ ), "Shape of F must be ({},{}), but is {}".format(
1290
+ self.dim_x, self.dim_x, F.shape
1291
+ )
1292
+
1293
+ assert np.ndim(H) == 2, "Shape of H must be (dim_z, {}), but is {}".format(
1294
+ P.shape[0], np.shape(H)
1295
+ )
1296
+
1297
+ assert (
1298
+ H.shape[1] == P.shape[0]
1299
+ ), "Shape of H must be (dim_z, {}), but is {}".format(P.shape[0], H.shape)
1300
+
1301
+ # shape of R must be the same as HPH'
1302
+ hph_shape = (H.shape[0], H.shape[0])
1303
+ r_shape = np.shape(R)
1304
+
1305
+ if H.shape[0] == 1:
1306
+ # r can be scalar, 1D, or 2D in this case
1307
+ assert (
1308
+ r_shape == () or r_shape == (1,) or r_shape == (1, 1)
1309
+ ), "R must be scalar or one element array, but is shaped {}".format(r_shape)
1310
+ else:
1311
+ assert r_shape == hph_shape, "shape of R should be {} but it is {}".format(
1312
+ hph_shape, r_shape
1313
+ )
1314
+
1315
+ if z is not None:
1316
+ z_shape = np.shape(z)
1317
+ else:
1318
+ z_shape = (self.dim_z, 1)
1319
+
1320
+ # H@x must have shape of z
1321
+ Hx = np.dot(H, x)
1322
+
1323
+ if z_shape == (): # scalar or np.array(scalar)
1324
+ assert Hx.ndim == 1 or np.shape(Hx) == (
1325
+ 1,
1326
+ 1,
1327
+ ), "shape of z should be {}, not {} for the given H".format(
1328
+ np.shape(Hx), z_shape
1329
+ )
1330
+
1331
+ elif np.shape(Hx) == (1,):
1332
+ assert z_shape[0] == 1, "Shape of z must be {} for the given H".format(
1333
+ np.shape(Hx)
1334
+ )
1335
+
1336
+ else:
1337
+ assert z_shape == np.shape(Hx) or (
1338
+ len(z_shape) == 1 and np.shape(Hx) == (z_shape[0], 1)
1339
+ ), "shape of z should be {}, not {} for the given H".format(
1340
+ np.shape(Hx), z_shape
1341
+ )
1342
+
1343
+ if np.ndim(Hx) > 1 and np.shape(Hx) != (1, 1):
1344
+ assert (
1345
+ np.shape(Hx) == z_shape
1346
+ ), "shape of z should be {} for the given H, but it is {}".format(
1347
+ np.shape(Hx), z_shape
1348
+ )
1349
+
1350
+
1351
+ def update(x, P, z, R, H=None, return_all=False):
1352
+ """
1353
+ Add a new measurement (z) to the Kalman filter. If z is None, nothing
1354
+ is changed.
1355
+
1356
+ This can handle either the multidimensional or unidimensional case. If
1357
+ all parameters are floats instead of arrays the filter will still work,
1358
+ and return floats for x, P as the result.
1359
+
1360
+ update(1, 2, 1, 1, 1) # univariate
1361
+ update(x, P, 1
1362
+
1363
+
1364
+
1365
+ Parameters
1366
+ ----------
1367
+
1368
+ x : numpy.array(dim_x, 1), or float
1369
+ State estimate vector
1370
+
1371
+ P : numpy.array(dim_x, dim_x), or float
1372
+ Covariance matrix
1373
+
1374
+ z : (dim_z, 1): array_like
1375
+ measurement for this update. z can be a scalar if dim_z is 1,
1376
+ otherwise it must be convertible to a column vector.
1377
+
1378
+ R : numpy.array(dim_z, dim_z), or float
1379
+ Measurement noise matrix
1380
+
1381
+ H : numpy.array(dim_x, dim_x), or float, optional
1382
+ Measurement function. If not provided, a value of 1 is assumed.
1383
+
1384
+ return_all : bool, default False
1385
+ If true, y, K, S, and log_likelihood are returned, otherwise
1386
+ only x and P are returned.
1387
+
1388
+ Returns
1389
+ -------
1390
+
1391
+ x : numpy.array
1392
+ Posterior state estimate vector
1393
+
1394
+ P : numpy.array
1395
+ Posterior covariance matrix
1396
+
1397
+ y : numpy.array or scalar
1398
+ Residua. Difference between measurement and state in measurement space
1399
+
1400
+ K : numpy.array
1401
+ Kalman gain
1402
+
1403
+ S : numpy.array
1404
+ System uncertainty in measurement space
1405
+
1406
+ log_likelihood : float
1407
+ log likelihood of the measurement
1408
+ """
1409
+
1410
+ # pylint: disable=bare-except
1411
+
1412
+ if z is None:
1413
+ if return_all:
1414
+ return x, P, None, None, None, None
1415
+ return x, P
1416
+
1417
+ if H is None:
1418
+ H = np.array([1])
1419
+
1420
+ if np.isscalar(H):
1421
+ H = np.array([H])
1422
+
1423
+ Hx = np.atleast_1d(np.dot(H, x))
1424
+ z = reshape_z(z, Hx.shape[0], x.ndim)
1425
+
1426
+ # error (residual) between measurement and prediction
1427
+ y = z - Hx
1428
+
1429
+ # project system uncertainty into measurement space
1430
+ S = np.dot(np.dot(H, P), H.T) + R
1431
+
1432
+ # map system uncertainty into kalman gain
1433
+ try:
1434
+ K = np.dot(np.dot(P, H.T), np.linalg.inv(S))
1435
+ except Exception:
1436
+ # can't invert a 1D array, annoyingly
1437
+ K = np.dot(np.dot(P, H.T), 1.0 / S)
1438
+
1439
+ # predict new x with residual scaled by the kalman gain
1440
+ x = x + np.dot(K, y)
1441
+
1442
+ # P = (I-KH)P(I-KH)' + KRK'
1443
+ KH = np.dot(K, H)
1444
+
1445
+ try:
1446
+ I_KH = np.eye(KH.shape[0]) - KH
1447
+ except Exception:
1448
+ I_KH = np.array([1 - KH])
1449
+ P = np.dot(np.dot(I_KH, P), I_KH.T) + np.dot(np.dot(K, R), K.T)
1450
+
1451
+ if return_all:
1452
+ # compute log likelihood
1453
+ log_likelihood = logpdf(z, np.dot(H, x), S)
1454
+ return x, P, y, K, S, log_likelihood
1455
+ return x, P
1456
+
1457
+
1458
+ def update_steadystate(x, z, K, H=None):
1459
+ """
1460
+ Add a new measurement (z) to the Kalman filter. If z is None, nothing
1461
+ is changed.
1462
+
1463
+
1464
+ Parameters
1465
+ ----------
1466
+
1467
+ x : numpy.array(dim_x, 1), or float
1468
+ State estimate vector
1469
+
1470
+
1471
+ z : (dim_z, 1): array_like
1472
+ measurement for this update. z can be a scalar if dim_z is 1,
1473
+ otherwise it must be convertible to a column vector.
1474
+
1475
+ K : numpy.array, or float
1476
+ Kalman gain matrix
1477
+
1478
+ H : numpy.array(dim_x, dim_x), or float, optional
1479
+ Measurement function. If not provided, a value of 1 is assumed.
1480
+
1481
+ Returns
1482
+ -------
1483
+
1484
+ x : numpy.array
1485
+ Posterior state estimate vector
1486
+
1487
+ Examples
1488
+ --------
1489
+
1490
+ This can handle either the multidimensional or unidimensional case. If
1491
+ all parameters are floats instead of arrays the filter will still work,
1492
+ and return floats for x, P as the result.
1493
+
1494
+ >>> update_steadystate(1, 2, 1) # univariate
1495
+ >>> update_steadystate(x, P, z, H)
1496
+ """
1497
+
1498
+ if z is None:
1499
+ return x
1500
+
1501
+ if H is None:
1502
+ H = np.array([1])
1503
+
1504
+ if np.isscalar(H):
1505
+ H = np.array([H])
1506
+
1507
+ Hx = np.atleast_1d(np.dot(H, x))
1508
+ z = reshape_z(z, Hx.shape[0], x.ndim)
1509
+
1510
+ # error (residual) between measurement and prediction
1511
+ y = z - Hx
1512
+
1513
+ # estimate new x with residual scaled by the kalman gain
1514
+ return x + np.dot(K, y)
1515
+
1516
+
1517
+ def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.0):
1518
+ """
1519
+ Predict next state (prior) using the Kalman filter state propagation
1520
+ equations.
1521
+
1522
+ Parameters
1523
+ ----------
1524
+
1525
+ x : numpy.array
1526
+ State estimate vector
1527
+
1528
+ P : numpy.array
1529
+ Covariance matrix
1530
+
1531
+ F : numpy.array()
1532
+ State Transition matrix
1533
+
1534
+ Q : numpy.array, Optional
1535
+ Process noise matrix
1536
+
1537
+
1538
+ u : numpy.array, Optional, default 0.
1539
+ Control vector. If non-zero, it is multiplied by B
1540
+ to create the control input into the system.
1541
+
1542
+ B : numpy.array, optional, default 0.
1543
+ Control transition matrix.
1544
+
1545
+ alpha : float, Optional, default=1.0
1546
+ Fading memory setting. 1.0 gives the normal Kalman filter, and
1547
+ values slightly larger than 1.0 (such as 1.02) give a fading
1548
+ memory effect - previous measurements have less influence on the
1549
+ filter's estimates. This formulation of the Fading memory filter
1550
+ (there are many) is due to Dan Simon
1551
+
1552
+ Returns
1553
+ -------
1554
+
1555
+ x : numpy.array
1556
+ Prior state estimate vector
1557
+
1558
+ P : numpy.array
1559
+ Prior covariance matrix
1560
+ """
1561
+
1562
+ if np.isscalar(F):
1563
+ F = np.array(F)
1564
+ x = np.dot(F, x) + np.dot(B, u)
1565
+ P = (alpha * alpha) * np.dot(np.dot(F, P), F.T) + Q
1566
+
1567
+ return x, P
1568
+
1569
+
1570
+ def predict_steadystate(x, F=1, u=0, B=1):
1571
+ """
1572
+ Predict next state (prior) using the Kalman filter state propagation
1573
+ equations. This steady state form only computes x, assuming that the
1574
+ covariance is constant.
1575
+
1576
+ Parameters
1577
+ ----------
1578
+
1579
+ x : numpy.array
1580
+ State estimate vector
1581
+
1582
+ P : numpy.array
1583
+ Covariance matrix
1584
+
1585
+ F : numpy.array()
1586
+ State Transition matrix
1587
+
1588
+ u : numpy.array, Optional, default 0.
1589
+ Control vector. If non-zero, it is multiplied by B
1590
+ to create the control input into the system.
1591
+
1592
+ B : numpy.array, optional, default 0.
1593
+ Control transition matrix.
1594
+
1595
+ Returns
1596
+ -------
1597
+
1598
+ x : numpy.array
1599
+ Prior state estimate vector
1600
+ """
1601
+
1602
+ if np.isscalar(F):
1603
+ F = np.array(F)
1604
+ x = np.dot(F, x) + np.dot(B, u)
1605
+
1606
+ return x
1607
+
1608
+
1609
+ def batch_filter(
1610
+ x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=False, saver=None
1611
+ ):
1612
+ """
1613
+ Batch processes a sequences of measurements.
1614
+
1615
+ Parameters
1616
+ ----------
1617
+
1618
+ zs : list-like
1619
+ list of measurements at each time step. Missing measurements must be
1620
+ represented by None.
1621
+
1622
+ Fs : list-like
1623
+ list of values to use for the state transition matrix matrix.
1624
+
1625
+ Qs : list-like
1626
+ list of values to use for the process error
1627
+ covariance.
1628
+
1629
+ Hs : list-like
1630
+ list of values to use for the measurement matrix.
1631
+
1632
+ Rs : list-like
1633
+ list of values to use for the measurement error
1634
+ covariance.
1635
+
1636
+ Bs : list-like, optional
1637
+ list of values to use for the control transition matrix;
1638
+ a value of None in any position will cause the filter
1639
+ to use `self.B` for that time step.
1640
+
1641
+ us : list-like, optional
1642
+ list of values to use for the control input vector;
1643
+ a value of None in any position will cause the filter to use
1644
+ 0 for that time step.
1645
+
1646
+ update_first : bool, optional
1647
+ controls whether the order of operations is update followed by
1648
+ predict, or predict followed by update. Default is predict->update.
1649
+
1650
+ saver : filterpy.common.Saver, optional
1651
+ filterpy.common.Saver object. If provided, saver.save() will be
1652
+ called after every epoch
1653
+
1654
+ Returns
1655
+ -------
1656
+
1657
+ means : np.array((n,dim_x,1))
1658
+ array of the state for each time step after the update. Each entry
1659
+ is an np.array. In other words `means[k,:]` is the state at step
1660
+ `k`.
1661
+
1662
+ covariance : np.array((n,dim_x,dim_x))
1663
+ array of the covariances for each time step after the update.
1664
+ In other words `covariance[k,:,:]` is the covariance at step `k`.
1665
+
1666
+ means_predictions : np.array((n,dim_x,1))
1667
+ array of the state for each time step after the predictions. Each
1668
+ entry is an np.array. In other words `means[k,:]` is the state at
1669
+ step `k`.
1670
+
1671
+ covariance_predictions : np.array((n,dim_x,dim_x))
1672
+ array of the covariances for each time step after the prediction.
1673
+ In other words `covariance[k,:,:]` is the covariance at step `k`.
1674
+
1675
+ Examples
1676
+ --------
1677
+
1678
+ .. code-block:: Python
1679
+
1680
+ zs = [t + random.randn()*4 for t in range (40)]
1681
+ Fs = [kf.F for t in range (40)]
1682
+ Hs = [kf.H for t in range (40)]
1683
+
1684
+ (mu, cov, _, _) = kf.batch_filter(zs, Rs=R_list, Fs=Fs, Hs=Hs, Qs=None,
1685
+ Bs=None, us=None, update_first=False)
1686
+ (xs, Ps, Ks) = kf.rts_smoother(mu, cov, Fs=Fs, Qs=None)
1687
+
1688
+ """
1689
+
1690
+ n = np.size(zs, 0)
1691
+ dim_x = x.shape[0]
1692
+
1693
+ # mean estimates from Kalman Filter
1694
+ if x.ndim == 1:
1695
+ means = np.zeros((n, dim_x))
1696
+ means_p = np.zeros((n, dim_x))
1697
+ else:
1698
+ means = np.zeros((n, dim_x, 1))
1699
+ means_p = np.zeros((n, dim_x, 1))
1700
+
1701
+ # state covariances from Kalman Filter
1702
+ covariances = np.zeros((n, dim_x, dim_x))
1703
+ covariances_p = np.zeros((n, dim_x, dim_x))
1704
+
1705
+ if us is None:
1706
+ us = [0.0] * n
1707
+ Bs = [0.0] * n
1708
+
1709
+ if update_first:
1710
+ for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
1711
+ x, P = update(x, P, z, R=R, H=H)
1712
+ means[i, :] = x
1713
+ covariances[i, :, :] = P
1714
+
1715
+ x, P = predict(x, P, u=u, B=B, F=F, Q=Q)
1716
+ means_p[i, :] = x
1717
+ covariances_p[i, :, :] = P
1718
+ if saver is not None:
1719
+ saver.save()
1720
+ else:
1721
+ for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):
1722
+ x, P = predict(x, P, u=u, B=B, F=F, Q=Q)
1723
+ means_p[i, :] = x
1724
+ covariances_p[i, :, :] = P
1725
+
1726
+ x, P = update(x, P, z, R=R, H=H)
1727
+ means[i, :] = x
1728
+ covariances[i, :, :] = P
1729
+ if saver is not None:
1730
+ saver.save()
1731
+
1732
+ return (means, covariances, means_p, covariances_p)
1733
+
1734
+
1735
+ def rts_smoother(Xs, Ps, Fs, Qs):
1736
+ """
1737
+ Runs the Rauch-Tung-Striebal Kalman smoother on a set of
1738
+ means and covariances computed by a Kalman filter. The usual input
1739
+ would come from the output of `KalmanFilter.batch_filter()`.
1740
+
1741
+ Parameters
1742
+ ----------
1743
+
1744
+ Xs : numpy.array
1745
+ array of the means (state variable x) of the output of a Kalman
1746
+ filter.
1747
+
1748
+ Ps : numpy.array
1749
+ array of the covariances of the output of a kalman filter.
1750
+
1751
+ Fs : list-like collection of numpy.array
1752
+ State transition matrix of the Kalman filter at each time step.
1753
+
1754
+ Qs : list-like collection of numpy.array, optional
1755
+ Process noise of the Kalman filter at each time step.
1756
+
1757
+ Returns
1758
+ -------
1759
+
1760
+ x : numpy.ndarray
1761
+ smoothed means
1762
+
1763
+ P : numpy.ndarray
1764
+ smoothed state covariances
1765
+
1766
+ K : numpy.ndarray
1767
+ smoother gain at each step
1768
+
1769
+ pP : numpy.ndarray
1770
+ predicted state covariances
1771
+
1772
+ Examples
1773
+ --------
1774
+
1775
+ .. code-block:: Python
1776
+
1777
+ zs = [t + random.randn()*4 for t in range (40)]
1778
+
1779
+ (mu, cov, _, _) = kalman.batch_filter(zs)
1780
+ (x, P, K, pP) = rts_smoother(mu, cov, kf.F, kf.Q)
1781
+ """
1782
+
1783
+ if len(Xs) != len(Ps):
1784
+ raise ValueError("length of Xs and Ps must be the same")
1785
+
1786
+ n = Xs.shape[0]
1787
+ dim_x = Xs.shape[1]
1788
+
1789
+ # smoother gain
1790
+ K = np.zeros((n, dim_x, dim_x))
1791
+ x, P, pP = Xs.copy(), Ps.copy(), Ps.copy()
1792
+
1793
+ for k in range(n - 2, -1, -1):
1794
+ pP[k] = np.dot(np.dot(Fs[k], P[k]), Fs[k].T) + Qs[k]
1795
+
1796
+ # pylint: disable=bad-whitespace
1797
+ K[k] = np.dot(np.dot(P[k], Fs[k].T), np.linalg.inv(pP[k]))
1798
+ x[k] += np.dot(K[k], x[k + 1] - np.dot(Fs[k], x[k]))
1799
+ P[k] += np.dot(np.dot(K[k], P[k + 1] - pP[k]), K[k].T)
1800
+
1801
+ return (x, P, K, pP)
1802
+
1803
+
1804
+ def reshape_z(z, dim_z, ndim):
1805
+ """ensure z is a (dim_z, 1) shaped vector"""
1806
+
1807
+ z = np.atleast_2d(z)
1808
+ if z.shape[1] == dim_z:
1809
+ z = z.T
1810
+
1811
+ if z.shape != (dim_z, 1):
1812
+ raise ValueError("z must be convertible to shape ({}, 1)".format(dim_z))
1813
+
1814
+ if ndim == 1:
1815
+ z = z[:, 0]
1816
+
1817
+ if ndim == 0:
1818
+ z = z[0, 0]
1819
+
1820
+ return z
1821
+
1822
+
1823
+ def pretty_str(label, arr):
1824
+ """
1825
+ Generates a pretty printed NumPy array with an assignment. Optionally
1826
+ transposes column vectors so they are drawn on one line. Strictly speaking
1827
+ arr can be any time convertible by `str(arr)`, but the output may not
1828
+ be what you want if the type of the variable is not a scalar or an
1829
+ ndarray.
1830
+
1831
+ Examples
1832
+ --------
1833
+ >>> pprint('cov', np.array([[4., .1], [.1, 5]]))
1834
+ cov = [[4. 0.1]
1835
+ [0.1 5. ]]
1836
+
1837
+ >>> print(pretty_str('x', np.array([[1], [2], [3]])))
1838
+ x = [[1 2 3]].T
1839
+ """
1840
+
1841
+ def is_col(a):
1842
+ """return true if a is a column vector"""
1843
+ try:
1844
+ return a.shape[0] > 1 and a.shape[1] == 1
1845
+ except (AttributeError, IndexError):
1846
+ return False
1847
+
1848
+ if label is None:
1849
+ label = ""
1850
+
1851
+ if label:
1852
+ label += " = "
1853
+
1854
+ if is_col(arr):
1855
+ return label + str(arr.T).replace("\n", "") + ".T"
1856
+
1857
+ rows = str(arr).split("\n")
1858
+ if not rows:
1859
+ return ""
1860
+
1861
+ s = label + rows[0]
1862
+ pad = " " * len(label)
1863
+ for line in rows[1:]:
1864
+ s = s + "\n" + pad + line
1865
+
1866
+ return s
1867
+
1868
+
1869
+ def logpdf(x, mean=None, cov=1, allow_singular=True):
1870
+ """
1871
+ Computes the log of the probability density function of the normal
1872
+ N(mean, cov) for the data x. The normal may be univariate or multivariate.
1873
+
1874
+ Wrapper for older versions of scipy.multivariate_normal.logpdf which
1875
+ don't support support the allow_singular keyword prior to verion 0.15.0.
1876
+
1877
+ If it is not supported, and cov is singular or not PSD you may get
1878
+ an exception.
1879
+
1880
+ `x` and `mean` may be column vectors, row vectors, or lists.
1881
+ """
1882
+
1883
+ if mean is not None:
1884
+ flat_mean = np.asarray(mean).flatten()
1885
+ else:
1886
+ flat_mean = None
1887
+
1888
+ flat_x = np.asarray(x).flatten()
1889
+
1890
+ return multivariate_normal.logpdf(flat_x, flat_mean, cov, allow_singular)