c4dynamics 2.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. c4dynamics/__init__.py +240 -0
  2. c4dynamics/datasets/__init__.py +95 -0
  3. c4dynamics/datasets/_manager.py +596 -0
  4. c4dynamics/datasets/_registry.py +80 -0
  5. c4dynamics/detectors/__init__.py +37 -0
  6. c4dynamics/detectors/yolo3_opencv.py +686 -0
  7. c4dynamics/detectors/yolo3_tf.py +124 -0
  8. c4dynamics/eqm/__init__.py +324 -0
  9. c4dynamics/eqm/derivs.py +212 -0
  10. c4dynamics/eqm/integrate.py +359 -0
  11. c4dynamics/filters/__init__.py +1373 -0
  12. c4dynamics/filters/a.py +48 -0
  13. c4dynamics/filters/ekf.py +320 -0
  14. c4dynamics/filters/kalman.py +725 -0
  15. c4dynamics/filters/kalman_v0.py +1071 -0
  16. c4dynamics/filters/kalman_v1.py +821 -0
  17. c4dynamics/filters/lowpass.py +123 -0
  18. c4dynamics/filters/luenberger.py +97 -0
  19. c4dynamics/rotmat/__init__.py +141 -0
  20. c4dynamics/rotmat/animate.py +465 -0
  21. c4dynamics/rotmat/rotmat.py +351 -0
  22. c4dynamics/sensors/__init__.py +72 -0
  23. c4dynamics/sensors/lineofsight.py +78 -0
  24. c4dynamics/sensors/radar.py +740 -0
  25. c4dynamics/sensors/seeker.py +1030 -0
  26. c4dynamics/states/__init__.py +327 -0
  27. c4dynamics/states/lib/__init__.py +320 -0
  28. c4dynamics/states/lib/datapoint.py +660 -0
  29. c4dynamics/states/lib/pixelpoint.py +776 -0
  30. c4dynamics/states/lib/rigidbody.py +677 -0
  31. c4dynamics/states/state.py +1486 -0
  32. c4dynamics/utils/__init__.py +44 -0
  33. c4dynamics/utils/_struct.py +6 -0
  34. c4dynamics/utils/const.py +130 -0
  35. c4dynamics/utils/cprint.py +80 -0
  36. c4dynamics/utils/gen_gif.py +142 -0
  37. c4dynamics/utils/idx2keys.py +4 -0
  38. c4dynamics/utils/images_loader.py +63 -0
  39. c4dynamics/utils/math.py +136 -0
  40. c4dynamics/utils/plottools.py +140 -0
  41. c4dynamics/utils/plottracks.py +304 -0
  42. c4dynamics/utils/printpts.py +36 -0
  43. c4dynamics/utils/slides_gen.py +64 -0
  44. c4dynamics/utils/tictoc.py +167 -0
  45. c4dynamics/utils/video_gen.py +300 -0
  46. c4dynamics/utils/vidgen.py +182 -0
  47. c4dynamics-2.0.3.dist-info/METADATA +242 -0
  48. c4dynamics-2.0.3.dist-info/RECORD +50 -0
  49. c4dynamics-2.0.3.dist-info/WHEEL +5 -0
  50. c4dynamics-2.0.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1373 @@
1
+ """
2
+
3
+ This page is an `introduction` to the filters module.
4
+ For the different filter objects themselves, go to :ref:`filters-header`.
5
+
6
+
7
+
8
+ The :mod:`filters <c4dynamics.filters>` module in `c4dynamics` provides a collection of
9
+ classes and functions for implementing various types of filters commonly used in control
10
+ systems and state estimation.
11
+
12
+
13
+
14
+ *******************
15
+ Background Material
16
+ *******************
17
+
18
+ The background material and the sections concerning the particular filters
19
+ are based on sources in references [AG]_ [SD]_ [ZP]_.
20
+
21
+ System Model
22
+ ============
23
+
24
+ State-space representation is a mathematical model of a physical system represented
25
+ as a set of input, output, and state variables related by first-order differential
26
+ (or difference) equations.
27
+
28
+ The state vector :math:`X` of a state-space model provides a snapshot of the system's current condition,
29
+ encapsulating all the variables necessary to describe its future behavior given the input.
30
+ (In `c4dynamics` the state vector is a fundamental data structure,
31
+ represented by the class :class:`state <c4dynamics.states.state.state>`)
32
+ and a snapshot of its values is provided by
33
+ the property :attr:`state.X <c4dynamics.states.state.state.X>`).
34
+
35
+ When the coefficients of the state variables in the equations are constant, the
36
+ state model represents a linear system (LTI, linear time invariant).
37
+ If the coefficients are
38
+ linear functions of time, the system is considered linear time varying.
39
+ Otherwise, the system is nonlinear.
40
+
41
+
42
+ Nonlinear Systems
43
+ =================
44
+
45
+ All systems are naturally nonlinear. When an equilibrium point
46
+ representing the major operation part of the system can be found, then a
47
+ linearization is performed about this point, and the system is regarded
48
+ linear.
49
+ When such a point cannot be easily found, more advanced approaches
50
+ have to be considered to analyze and manipulate the system. Such
51
+ an approach is the extended Kalman filter.
52
+
53
+ A nonlinear system is described by:
54
+
55
+ .. math::
56
+ :label: nonlinearmodel
57
+
58
+ \\dot{x}(t) = f(x, u) + \\omega
59
+
60
+ y(t) = h(x) + \\nu
61
+
62
+ x(0) = x_0
63
+
64
+
65
+
66
+ Where:
67
+
68
+ - :math:`f(\\cdot)` is an arbitrary vector-valued function representing the system process
69
+ - :math:`x` is the system state vector
70
+ - :math:`t` is a time variable
71
+ - :math:`u` is the system input signal
72
+ - :math:`\\omega` is the process uncertainty with covariance matrix :math:`Q_c`
73
+ - :math:`y` is the system output vector (the measure)
74
+ - :math:`h(\\cdot)` is an arbitrary vector-valued function representing the measurement equations
75
+ - :math:`\\nu` is the measure noise with covariance matrix :math:`R_c`
76
+ - :math:`x_0` is a vector of initial conditions
77
+
78
+ The noise processes :math:`\\omega(t)` and :math:`\\nu(t)` are white, zero-mean, uncorrelated,
79
+ and have known covariance matrices :math:`Q_c` and :math:`R_c`, respectively:
80
+
81
+ .. math::
82
+
83
+ \\omega(t) \\sim (0, Q_c)
84
+
85
+ \\nu(t) \\sim (0, R_c)
86
+
87
+ E[\\omega(t) \\cdot \\omega^T(t)] = Q_c \\cdot \\delta(t)
88
+
89
+ E[\\nu(t) \\cdot \\nu^T(t)] = R_c \\cdot \\delta(t)
90
+
91
+ E[\\nu(t) \\cdot \\omega^T(t)] = 0
92
+
93
+
94
+
95
+ Where:
96
+
97
+ - :math:`\\omega` is the process uncertainty with covariance matrix :math:`Q_c`
98
+ - :math:`\\nu` is the measure noise with covariance matrix :math:`R_c`
99
+ - :math:`Q_c` is the process covariance matrix
100
+ - :math:`R_c` is the measurement covariance matrix
101
+ - :math:`\\sim` is the distribution operator. :math:`\\sim (\\mu, \\sigma)` means a normal distribution with mean :math:`\\mu` and standard deviation :math:`\\sigma`
102
+ - :math:`E(\\cdot)` is the expectation operator
103
+ - :math:`\\delta(\\cdot)` is the Dirac delta function (:math:`\\delta(t) = \\infty` if :math:`t = 0`, and :math:`\\delta(t) = 0` if :math:`t \\neq 0`)
104
+ - superscript T is the transpose operator
105
+
106
+
107
+
108
+ Linearization
109
+ =============
110
+
111
+ A linear Kalman filter has a significant advantage in terms of simplicity and
112
+ computing resources, but much more importantly, the `System Covariance`_
113
+ in a linear Kalman provides exact predictions of the errors in the state estimates.
114
+ The extended Kalman filter offers no such guarantees.
115
+ Therefore it is always a good practice to start by
116
+ an attempt to linearize the system.
117
+
118
+ The linearized model of system :eq:`nonlinearmodel` around a nominal trajectory :math:`x_n` is given by [MZ]_:
119
+
120
+
121
+ .. math::
122
+ :label: linearizedmodel
123
+
124
+ \\dot{x} = \\Delta{x} \\cdot {\\partial{f} \\over \\partial{x}}\\bigg|_{x_n, u_n}
125
+ + \\Delta{u} \\cdot {\\partial{f} \\over \\partial{u}}\\bigg|_{x_n, u_n} + \\omega
126
+
127
+ y = \\Delta{x} \\cdot {\\partial{h} \\over \\partial{x}}\\bigg|_{x_n} + \\nu
128
+
129
+ \\\\
130
+
131
+ x(0) = x_0
132
+
133
+
134
+ Where:
135
+
136
+ - :math:`\\Delta{x}` is the linear approximation of a small deviation of the state :math:`x` from the nominal trajectory
137
+ - :math:`\\Delta{u}` is the linear approximation of a small deviation of the input control :math:`u` from the nominal trajectory
138
+ - :math:`\\omega` is the process uncertainty
139
+ - :math:`\\Delta{\\nu}` is the linear approximation of a small deviation of the noise :math:`\\nu` from the nominal trajectory
140
+ - :math:`{\\partial{f} \\over \\partial{i}}\\bigg|_{x_n, u_n}` is the partial derivative of :math:`f` with respect to :math:`i (i = x` or :math:`u)` substituted by the nominal point :math:`{x_n, u_n}`
141
+ - :math:`{\\partial{h} \\over \\partial{x}}\\bigg|_{x_n}` is the partial derivative of :math:`h` with respect to :math:`x`, substituted by the nominal point :math:`{x_n}`
142
+ - :math:`y` is the system output vector (the measure)
143
+ - :math:`x_0` is a vector of initial conditions
144
+
145
+
146
+
147
+ Let's denote:
148
+
149
+ .. math::
150
+
151
+ A = {\\partial{f} \\over \\partial{x}}\\bigg|_{x_n, u_n, \\omega_n}
152
+
153
+ B = {\\partial{f} \\over \\partial{u}}\\bigg|_{x_n, u_n, \\omega_n}
154
+
155
+ C = {\\partial{h} \\over \\partial{x}}\\bigg|_{x_n, \\nu_n}
156
+
157
+
158
+
159
+ Finally the linear model of system :eq:`nonlinearmodel` is:
160
+
161
+ .. math::
162
+ :label: linearmodel
163
+
164
+ \\dot{x} = A \\cdot x + B \\cdot u + \\omega
165
+
166
+ y = C \\cdot x + \\nu
167
+
168
+ x(0) = x_0
169
+
170
+ Where:
171
+
172
+ - :math:`A` is the process dynamics matrix
173
+ - :math:`x` is the system state vector
174
+ - :math:`b` is the process input matrix
175
+ - :math:`u` is the system input signal
176
+ - :math:`\\omega` is the process uncertainty with covariance matrix :math:`Q_c`
177
+ - :math:`y` is the system output vector (the measure)
178
+ - :math:`C` is the output matrix
179
+ - :math:`\\nu` is the measure noise with covariance matrix :math:`R_c`
180
+ - :math:`x_0` is a vector of initial conditions
181
+ - :math:`Q_c` is the process covariance matrix
182
+ - :math:`R_c` is the measurement covariance matrix
183
+
184
+
185
+ Sampled Systems
186
+ ===============
187
+
188
+ The nonlinear system :eq:`nonlinearmodel` and its linearized form :eq:`linearmodel`
189
+ are given in the continuous-time domain, which is the progressive manifestation of any physical system.
190
+ However, the output of a system is usually sampled by digital devices in discrete time instances.
191
+
192
+ Hence, in sampled-data systems the dynamics is described by a continuous-time differential equation,
193
+ but the output only changes at discrete time instants.
194
+
195
+ Nonetheless, for numerical considerations the Kalman filter equations are usually given in the discrete-time domain
196
+ not only at the stage of measure updates (`update` or `correct`) but also at the stage of the dynamics propagation (`predict`).
197
+
198
+ The discrete-time form of system :eq:`linearmodel` is given by:
199
+
200
+ .. math::
201
+ :label: discretemodel
202
+
203
+ x_k = F \\cdot x_{k-1} + G \\cdot u_{k-1} + \\omega_{k-1}
204
+
205
+ y_k = H \\cdot x_k + \\nu_k
206
+
207
+ x_{k=0} = x_0
208
+
209
+ Where:
210
+
211
+ - :math:`x_k` is the discretized system state vector
212
+ - :math:`F` is the discretized process dynamics matrix (actually a first order approximation of the state transition matrix :math:`\\Phi`)
213
+ - :math:`G` is the discretized process input matrix
214
+ - :math:`u` is the discretized process input signal
215
+ - :math:`\\omega_k` is the process uncertainty with covariance matrix :math:`Q_k`
216
+ - :math:`y_k` is the discretized system output vector (the measurement)
217
+ - :math:`H` is the discrete measurement matrix
218
+ - :math:`\\nu_k` is the measure noise with covariance matrix :math:`R_k`
219
+ - :math:`x_0` is a vector of initial conditions
220
+
221
+
222
+ The noise processes :math:`\\omega_{k}` and :math:`\\nu_k` are white, zero-mean, uncorrelated,
223
+ and have known covariance matrices :math:`Q_k` and :math:`R_k`, respectively:
224
+
225
+ .. math::
226
+
227
+ \\omega_k \\sim (0, Q_k)
228
+
229
+ \\nu_k \\sim (0, R_k)
230
+
231
+ E[\\omega_k \\cdot \\omega^T_j] = Q_k \\cdot \\delta_{k-j}
232
+
233
+ E[\\nu_k \\cdot \\nu^T_j] = R_k \\cdot \\delta_{k-j}
234
+
235
+ E[\\nu_k \\cdot \\omega^T_j] = 0
236
+
237
+
238
+
239
+ The discretization of a system is based on the state-transition matrix :math:`\\Phi(t)`.
240
+ For a matrix :math:`A` the state transition matrix :math:`\\Phi(t)` is given by the matrix exponential :math:`\\Phi = e^{A \\cdot t}`
241
+ which can be expanded as a power series.
242
+
243
+ An approximate representation of a continuous-time
244
+ system by a series expansion up to the first-order is given by:
245
+
246
+ .. math::
247
+
248
+ F = I + A \\cdot dt
249
+
250
+ G = B \\cdot dt
251
+
252
+ Q = Q_c \\cdot dt
253
+
254
+ R = R_c / dt
255
+
256
+
257
+ Where:
258
+
259
+ - :math:`x_k` is the discretized system state vector
260
+ - :math:`F` is the discretized process dynamics matrix (actually a first order approximation of the state transition matrix :math:`\\Phi`)
261
+ - :math:`G` is the discretized process input matrix
262
+ - :math:`u` is the discretized process input signal
263
+ - :math:`\\omega_k` is the process uncertainty with covariance matrix :math:`Q_k`
264
+ - :math:`y_k` is the discretized system output vector (the measurement)
265
+ - :math:`H` is the discrete measurement matrix
266
+ - :math:`\\nu_k` is the measure noise with covariance matrix :math:`R_k`
267
+ - :math:`x_0` is a vector of initial conditions
268
+ - :math:`I` is the identity matrix
269
+ - :math:`dt` is the sampling time
270
+ - :math:`\\sim` is the distribution operator. :math:`\\sim (\\mu, \\sigma)` means a normal distribution with mean :math:`\\mu` and standard deviation :math:`\\sigma`
271
+ - :math:`E(\\cdot)` is the expectation operator
272
+ - :math:`\\delta(\\cdot)` is the Kronecker delta function (:math:`\\delta(k-j) = 1` if :math:`k = j`, and :math:`\\delta_{k-j} = 0` if :math:`k \\neq j`)
273
+ - superscript T is the transpose operator
274
+ - :math:`Q` is the process covariance matrix
275
+ - :math:`R` is the measurement covariance matrix
276
+ - :math:`A, B, Q_c, R_c` are the continuous-time system variables of the system state matrix, system input vector, process covariance matrix, and measurement covariance matrix, respectively
277
+
278
+
279
+
280
+
281
+ Note that the covariance matrices may have been converted from
282
+ the continuous-time system to discrete-time.
283
+ However, in most cases, these parameters are determined through experimentation
284
+ with the system in its final form.
285
+
286
+ Additionally, measurements are sampled by digital devices at discrete time steps,
287
+ and the noise properties are typically provided in that form.
288
+ However, if the process noise applies to a kinematic system where the noise properties
289
+ are specified in continuous terms, the above approximation can be used or
290
+ the more exact expression for continuous white noise model
291
+ :math:`Q = \\int_{0}^{dt} F \\cdot Qc \\cdot F^T \\, dt`
292
+
293
+
294
+
295
+
296
+
297
+
298
+
299
+ System Covariance
300
+ =================
301
+
302
+ Before getting into the Kalman filter itself, it is necessary to consider one more concept,
303
+ that is the system covariance.
304
+
305
+ Usually denoted by :math:`P`, this variable represents the current uncertainty of the estimate.
306
+
307
+ :math:`P` is a matrix that quantifies the estimated accuracy of the state variables,
308
+ with its diagonal elements indicating the variance of each state variable,
309
+ and the off-diagonal elements representing the covariances between different state variables.
310
+
311
+ :math:`P` is iteratively refined through the `predict` and the `update` steps. Its
312
+ initial state, :math:`P_0`,
313
+ is chosen based on prior knowledge to reflect the confidence in the initial state estimate (:math:`x_0`).
314
+
315
+
316
+
317
+
318
+
319
+ ******************************************************************
320
+ Kalman Filter (:class:`kalman <c4dynamics.filters.kalman.kalman>`)
321
+ ******************************************************************
322
+
323
+ A simple way to design a Kalman filter is to separate between two steps: `predict` and `update` (sometimes called `correct`).
324
+ The `predict` step is used to project the estimate forward in time.
325
+ The `update` corrects the prediction by using a new measure.
326
+
327
+ Predict
328
+ =======
329
+
330
+ In the prediction step the current estimate is projected forward in time to
331
+ obtain a predicted estimate using the system model.
332
+
333
+ The current state estimate, :math:`x`, is projected into the future using the known system dynamics :eq:`discretemodel`.
334
+ The uncertainty associated with the predicted state, :math:`P`, is calculated by projecting the
335
+ current error covariance forward in time.
336
+
337
+ Since the `predict` equations are calculated before a measure is taken (a priori), the new state :math:`x` and the new covariance :math:`P`
338
+ are notated by :math:`(-)` superscript.
339
+
340
+ .. math::
341
+
342
+ x_k^- = F \\cdot x_{k-1}^+ + G \\cdot u_{k-1}
343
+
344
+ P_k^- = F \\cdot P_{k-1}^+ \\cdot F^T + Q
345
+
346
+ x_0^+ = x_0
347
+
348
+ P_0^+ = E[x_0 \\cdot x_0^T]
349
+
350
+ Where:
351
+
352
+ - :math:`x_k^-` is the estimate of the system state, :math:`x_k`, before a measurement update.
353
+ - :math:`F` is the discretized process dynamics matrix
354
+ - :math:`G` is the discretized process input matrix
355
+ - :math:`u_k` is the process input signal
356
+ - :math:`P_k^-` is the estimate of the system covariance matrix, :math:`P_k`, before a measurement update
357
+ - :math:`P_{k-1}^+` is the system covariance matrix estimate, :math:`P_k`, from previous measurement update
358
+ - :math:`Q` is the process covariance matrix
359
+ - :math:`R` is the measurement covariance matrix
360
+ - superscript T is the transpose operator
361
+ - :math:`x_0` is the initial state estimation
362
+ - :math:`P_0` is the covariance matrix consisting of errors of the initial estimation
363
+
364
+
365
+ Update
366
+ ======
367
+
368
+ In the update step (also called `correct`), the estimate is corrected by using a new measure.
369
+
370
+ The Kalman gain, :math:`K`, is computed based on the predicted error covariance and the measurement noise.
371
+ It determines the optimal weighting between the predicted state and the new measurement.
372
+
373
+ The predicted state estimate is adjusted using the new measurement, weighted by the Kalman Gain.
374
+ This update incorporates the latest measurement to refine the state estimate.
375
+ Then the error covariance is updated to reflect the reduced uncertainty after incorporating the new measurement.
376
+
377
+
378
+ The `update` equations are calculated after a measure is taken (a posteriori), and the new state :math:`x` and the new covariance :math:`P`
379
+ are notated by :math:`(+)` superscript.
380
+
381
+ .. math::
382
+
383
+ K = P_k^- \\cdot H^T \\cdot (H \\cdot P_k^- \\cdot H^T + R)^{-1}
384
+
385
+ x_k^+ = x_k^- \\cdot K \\cdot (y - H \\cdot x_k^-)
386
+
387
+ P_k^+ = (I - K \\cdot H) \\cdot P_k^-
388
+
389
+ Where:
390
+
391
+ - :math:`K` is the Kalman gain
392
+ - :math:`P_k^-` is the estimate of the system covariance matrix, :math:`P_k`, from the previous prediction
393
+ - :math:`H` is the discrete measurement matrix
394
+ - :math:`R` is the measurement covariance matrix
395
+ - :math:`x_k^+` is the estimate of the system state, :math:`x_k`, after a measurement update
396
+ - :math:`x_k^-` is the estimate of the system state, :math:`x_k`, from the previous prediction
397
+ - :math:`y` is the measure
398
+ - :math:`I` is the identity matrix
399
+ - :math:`P_k^+` is the estimate of the system covariance matrix, :math:`P_k`, after a measurement update
400
+ - superscript T is the transpose operator
401
+
402
+
403
+ .. _kalman_implementation:
404
+
405
+ Implementation (C4dynamics)
406
+ ===========================
407
+
408
+ :class:`kalman <c4dynamics.filters.kalman.kalman>`
409
+ is a discrete linear Kalman filter model.
410
+
411
+ Following the concept of separating `predict`
412
+ and `update`, running a Kalman filter is done
413
+ by constructing a Kalman filter with parameters as a
414
+ :class:`state <c4dynamics.states.state.state>` object
415
+ and calling the
416
+ :meth:`predict <c4dynamics.filters.kalman.kalman.predict>`
417
+ and :meth:`update <c4dynamics.filters.kalman.kalman.update>` methods.
418
+
419
+ The Kalman filter in `c4dynamics` is a class.
420
+ Thus, the user constructs an object that holds the
421
+ attributes required to build the estimates.
422
+ This is crucial to understand because when the user
423
+ calls the `predict` or `update`,
424
+ the object uses parameters and values from previous calls.
425
+
426
+
427
+ Every filter class in `c4dynamics` is a
428
+ subclass of the state class.
429
+ This means that the filter itself
430
+ encapsulates the estimated state vector:
431
+
432
+ .. code::
433
+
434
+ >>> from c4dynamics.filters import kalman
435
+ >>> import numpy as np
436
+ >>> z = np.zeros((2, 2))
437
+ >>> kf = kalman(X = {'x1': 0, 'x2': 0}, P0 = z, F = z, H = z, Q = z, R = z)
438
+ >>> print(kf)
439
+ [ x1 x2 ]
440
+
441
+ `z` is an arbitrary matrix used
442
+ to initialize a filter of
443
+ two variables (:math:`x_1, x_2`).
444
+
445
+
446
+ It also means that a filter object
447
+ inherits all the mathematical attributes
448
+ (norm, multiplication, etc.)
449
+ and data attributes (storage, plotting, etc.)
450
+ of a state object
451
+ (for further details, see :mod:`states <c4dynamics.states>`,
452
+ state :class:`state <c4dynamics.states.state.state>`,
453
+ and refer to the examples below)
454
+
455
+
456
+ Example
457
+ -------
458
+
459
+ An altimeter is measuring the altitude of an aircraft.
460
+ The flight path angle of the aircraft, :math:`\\gamma` is controlled
461
+ by a stick which deflects the
462
+ elevator that in its turn changes the aircaft altitude :math:`z`:
463
+
464
+ .. math::
465
+
466
+ \\dot{z}(t) = 5 \\cdot \\gamma(t) + \\omega_z(t)
467
+
468
+ \\dot{\\gamma}(t) = -0.5 \\cdot \\gamma(t) + 0.1 \\cdot (H_f - u(t)) + \\omega_{\\gamma}(t)
469
+
470
+ y(t) = z(t) + \\nu(t)
471
+
472
+
473
+ Where:
474
+
475
+ - :math:`z` is the deviation of the aircraft from the required altitude
476
+ - :math:`\\gamma` is the flight path angle
477
+ - :math:`H_f` is a constant altitude input required by the pilot
478
+ - :math:`\\omega_z` is the uncertainty in the altitude behavior
479
+ - :math:`\\omega_{\\gamma}` is the uncertainty in the flight path angle behavior
480
+ - :math:`u` is the deflection command
481
+ - :math:`y` is the output measure of `z`
482
+ - :math:`\\nu` is the measure noise
483
+
484
+ The process uncertainties are given by: :math:`\\omega_z \\sim (0, 0.5)[ft],
485
+ \\omega_{\\gamma} \\sim (0, 0.1)[deg]`.
486
+
487
+ Let :math:`H_f`, the required altitude by the pilot to be :math:`H_f = 1kft`.
488
+ The initial conditions are: :math:`z_0 = 1010ft` (error of :math:`10ft`), and :math:`\\gamma_0 = 0`.
489
+
490
+ The altimeter is sampling in a rate of :math:`50Hz (dt = 20msec)`
491
+ with measure noise of :math:`\\nu \\sim (0, 0.5)[ft]`.
492
+
493
+
494
+
495
+ A Kalman filter shall reduce the noise and estimate the state variables.
496
+ But at first it must be verified that the system is observable, otherwise the filter cannot
497
+ fully estimate the state variables based on the output measurements.
498
+
499
+
500
+ **Setup**
501
+
502
+
503
+ Import required packages:
504
+
505
+ .. code::
506
+
507
+ >>> from c4dynamics.filters import kalman
508
+ >>> from matplotlib import pyplot as plt
509
+ >>> from scipy.integrate import odeint
510
+ >>> import c4dynamics as c4d
511
+ >>> import numpy as np
512
+
513
+
514
+ Define system matrices:
515
+
516
+ .. code::
517
+
518
+ >>> A = np.array([[0, 5], [0, -0.5]])
519
+ >>> B = np.array([0, 0.1])
520
+ >>> C = np.array([1, 0])
521
+
522
+ Observability test:
523
+
524
+ .. code::
525
+
526
+ >>> n = A.shape[0]
527
+ >>> obsv = C
528
+ >>> for i in range(1, n):
529
+ ... obsv = np.vstack((obsv, C @ np.linalg.matrix_power(A, i)))
530
+ >>> rank = np.linalg.matrix_rank(obsv)
531
+ >>> print(f'The system is observable (rank = n = {n}).' if rank == n else 'The system is not observable (rank = {rank), n = {n}).')
532
+ The system is observable (rank = n = 2).
533
+
534
+
535
+ Some constants and initialization of the scene:
536
+
537
+ .. code::
538
+
539
+ >>> dt, tf = 0.01, 50
540
+ >>> tspan = np.arange(0, tf + dt, dt)
541
+ >>> Hf = 1000
542
+ >>> # reference target
543
+ >>> tgt = c4d.state(z = 1010, gamma = 0)
544
+
545
+
546
+ The dynamics is defined by an ODE function to be solved using scipy's ode integration:
547
+
548
+ .. code::
549
+
550
+ >>> def autopilot(y, t, u = 0, w = np.zeros(2)):
551
+ ... return A @ y + B * u + w
552
+
553
+
554
+ **Ideal system**
555
+
556
+ Let's start with a simulation of an ideal system.
557
+ The process has no uncertainties and the radar is clean of measurement errors (`isideal` flag on):
558
+
559
+ .. code::
560
+
561
+ >>> process_noise = np.zeros((2, 2))
562
+ >>> altmtr = c4d.sensors.radar(isideal = True, dt = 2 * dt)
563
+
564
+ Main loop:
565
+
566
+ .. code::
567
+
568
+ >>> for t in tspan:
569
+ ... tgt.store(t)
570
+ ... _, _, Z = altmtr.measure(tgt, t = t, store = True)
571
+ ... if Z is not None:
572
+ ... tgt.X = odeint(autopilot, tgt.X, [t, t + dt], args = (Hf - Z, process_noise @ np.random.randn(2)))[-1]
573
+
574
+
575
+ The loop advances the target variables according to the `autopilot` (accurate) dynamics
576
+ and the (ideal) measures of the radar.
577
+
578
+ Plot the time histories of the target altitude (:math:`z`) and flight path angle (:math:`\\gamma`):
579
+
580
+ .. code::
581
+
582
+ >>> fig, ax = plt.subplots(1, 2)
583
+ >>> # first axis
584
+ >>> ax[0].plot(*tgt.data('z'), 'm', label = 'true') # doctest: +IGNORE_OUTPUT
585
+ >>> ax[0].plot(*altmtr.data('range'), '.c', label = 'altimeter') # doctest: +IGNORE_OUTPUT
586
+ >>> c4d.plotdefaults(ax[0], 'Altitude', 't', 'ft')
587
+ >>> ax[0].legend() # doctest: +IGNORE_OUTPUT
588
+ >>> # second axis
589
+ >>> ax[1].plot(*tgt.data('gamma', c4d.r2d), 'm') # doctest: +IGNORE_OUTPUT
590
+ >>> c4d.plotdefaults(ax[1], 'Path Angle', 't', '')
591
+ >>> plt.show()
592
+
593
+ .. figure:: /_examples/filters/ap_ideal.png
594
+
595
+ The ideal altimeter measures the aircraft altitude precisely.
596
+ Its samples use to control the flight angle that started
597
+ at an altitude of :math:`10ft` above the required
598
+ altitude (:math:`Hf = 1000ft`) and is closed after about :math:`18s`.
599
+
600
+
601
+ **Noisy system**
602
+
603
+ Now, let's introduce the process uncertainty and measurement noise:
604
+
605
+ .. code::
606
+
607
+ >>> process_noise = np.diag([0.5, 0.1 * c4d.d2r])
608
+ >>> measure_noise = 1 # ft
609
+ >>> altmtr = c4d.sensors.radar(rng_noise_std = measure_noise, dt = 2 * dt)
610
+
611
+ Re-running the main loop yields:
612
+
613
+ .. figure:: /_examples/filters/ap_noisy.png
614
+
615
+ Very bad.
616
+ The errors corrupt the input that uses to control the altitude.
617
+ The point in which the altitude converges to its steady-state is more
618
+ than :math:`10s` later than the ideal case.
619
+
620
+
621
+ **Filtered system**
622
+
623
+ A Kalman filter should find optimized gains to minimize the mean squared error.
624
+ For the estimated state let's define a new object, :math:`kf`,
625
+ and initialize it with the estimated errors:
626
+
627
+
628
+ .. code::
629
+
630
+ >>> z_err = 5
631
+ >>> gma_err = 1 * c4d.d2r
632
+ >>> tgt = c4d.state(z = 1010, gamma = 0)
633
+ >>> kf = kalman(X = {'z': tgt.z + z_err, 'gamma': tgt.gamma + gma_err}
634
+ ... , P0 = [2 * z_err, 2 * gma_err]
635
+ ... , R = measure_noise**2 / dt, Q = process_noise**2 * dt
636
+ ... , F = np.eye(2) + A * dt, G = B * dt, H = C)
637
+
638
+
639
+
640
+ The main loop is changed to:
641
+
642
+ .. code::
643
+
644
+ >>> for t in tspan:
645
+ ... tgt.store(t)
646
+ ... kf.store(t)
647
+ ... tgt.X = odeint(autopilot, tgt.X, [t, t + dt], args = (Hf - kf.z, process_noise @ np.random.randn(2)))[-1]
648
+ ... kf.predict(u = Hf - kf.z)
649
+ ... _, _, Z = altmtr.measure(tgt, t = t, store = True)
650
+ ... if Z is not None:
651
+ ... kf.update(Z) # doctest: +IGNORE_OUTPUT
652
+
653
+
654
+
655
+ Plot the state estimates on the true the target altitude (:math:`z`) and flight path angle (:math:`\\gamma`):
656
+
657
+ .. code::
658
+
659
+ >>> fig, ax = plt.subplots(1, 2)
660
+ >>> # first axis
661
+ >>> ax[0].plot(*tgt.data('z'), 'm', label = 'true') # doctest: +IGNORE_OUTPUT
662
+ >>> ax[0].plot(*altmtr.data('range'), '.c', label = 'altimeter') # doctest: +IGNORE_OUTPUT
663
+ >>> ax[0].plot(*kf.data('z'), 'y', label = 'kf') # doctest: +IGNORE_OUTPUT
664
+ >>> c4d.plotdefaults(ax[0], 'Altitude', 't', 'ft')
665
+ >>> ax[0].legend() # doctest: +IGNORE_OUTPUT
666
+ >>> # second axis
667
+ >>> ax[1].plot(*tgt.data('gamma', c4d.r2d), 'm') # doctest: +IGNORE_OUTPUT
668
+ >>> ax[1].plot(*kf.data('gamma', c4d.r2d), 'y') # doctest: +IGNORE_OUTPUT
669
+ >>> c4d.plotdefaults(ax[1], 'Path Angle', 't', '')
670
+ >>> plt.show()
671
+
672
+ .. figure:: /_examples/filters/ap_filtered.png
673
+
674
+ The filtered altitude (`kf.z`) is used as input to control the system and
675
+ generates results almost as good as the ideal case.
676
+
677
+ Ultimately, the altimeter measuring the aircraft altitude
678
+ operates through a two-step process: prediction and update.
679
+ In the prediction step, the filter projects the current state estimate
680
+ forward using the system model.
681
+ In the update step, it corrects this prediction with new measurements.
682
+
683
+ As the Kalman filter implemented as a class,
684
+ its usage is by creating an instance and then calling its
685
+ predict and update methods for state estimation.
686
+
687
+
688
+
689
+
690
+
691
+
692
+
693
+
694
+
695
+
696
+
697
+
698
+
699
+
700
+
701
+ ******************************************************************
702
+ Extended Kalman Filter (:class:`ekf <c4dynamics.filters.ekf.ekf>`)
703
+ ******************************************************************
704
+
705
+ A linear Kalman filter
706
+ (:class:`kalman <c4dynamics.filters.kalman.kalman>`)
707
+ should be the first choice
708
+ when designing a state observer.
709
+ However, when a nominal trajectory cannot be found,
710
+ the solution is to linearize the system
711
+ at each cycle about the current estimated state.
712
+
713
+ Similarly to the linear Kalman filter,
714
+ a good approach to design an extended Kalman filter
715
+ is to separate it to two steps: `predict` and `update` (`correct`).
716
+
717
+ Since the iterative solution to the algebraic Riccati equation
718
+ (uses to calculate the optimal covariance matrix :math:`P`) involves
719
+ the matrix representation of the system parameters, the nonlinear equations
720
+ of the process and / or the measurement must be linearized
721
+ before executing each stage of the `ekf`.
722
+
723
+ Nevertheless, the calculation of the state vector :math:`x`
724
+ both in the predict step (projection in time using the process equations)
725
+ and in the update step (correction using the measure equations)
726
+ does not have to use the approximated linear expressions (:math:`F, H`)
727
+ and can use the exact nonlinear equations (:math:`f, h`).
728
+
729
+
730
+ Recall the mathematical model of a nonlinear system as given in :eq:`nonlinearmodel`:
731
+
732
+
733
+ .. math::
734
+
735
+ \\dot{x} = f(x, u, \\omega)
736
+
737
+ y = h(x, \\nu)
738
+
739
+ x(0) = x_0
740
+
741
+
742
+ Where:
743
+
744
+ - :math:`f(\\cdot)` is an arbitrary vector-valued function representing the system dynamics
745
+ - :math:`x` is the system state vector
746
+ - :math:`u` is the process input signal
747
+ - :math:`\\omega` is the process uncertainty with covariance matrix :math:`Q`
748
+ - :math:`y` is the system output vector
749
+ - :math:`h(\\cdot)` is an arbitrary vector-valued function representing the system output
750
+ - :math:`\\nu` is the measure noise with covariance matrix :math:`R`
751
+ - :math:`x_0` is a vector of initial conditions
752
+
753
+ The noise processes :math:`\\omega(t)` and :math:`\\nu(t)` are white, zero-mean, uncorrelated,
754
+ and have known covariance matrices :math:`Q` and :math:`R`, respectively:
755
+
756
+ .. math::
757
+
758
+ \\omega(t) \\sim (0, Q)
759
+
760
+ \\nu(t) \\sim (0, R)
761
+
762
+ E[\\omega(t) \\cdot \\omega^T(t)] = Q \\cdot \\delta(t)
763
+
764
+ E[\\nu(t) \\cdot \\nu^T(t)] = R \\cdot \\delta(t)
765
+
766
+ E[\\nu(t) \\cdot \\omega^T(t)] = 0
767
+
768
+
769
+ Where:
770
+
771
+ - :math:`\\omega` is the process uncertainty with covariance matrix :math:`Q`
772
+ - :math:`\\nu` is the measure noise with covariance matrix :math:`R`
773
+ - :math:`Q` is the process covariance matrix
774
+ - :math:`R` is the measurement covariance matrix
775
+ - :math:`\\sim` is the distribution operator. :math:`\\sim (\\mu, \\sigma)` means a normal distribution with mean :math:`\\mu` and standard deviation :math:`\\sigma`
776
+ - :math:`E(\\cdot)` is the expectation operator
777
+ - :math:`\\delta(\\cdot)` is the Dirac delta function (:math:`\\delta(t) = \\infty` if :math:`t = 0`, and :math:`\\delta(t) = 0` if :math:`t \\neq 0`)
778
+ - superscript T is the transpose operator
779
+
780
+
781
+ The linearized term for :math:`f` is given by its Jacobian with
782
+ respect to :math:`x`:
783
+
784
+ .. math::
785
+
786
+ A = {\\partial{f} \\over \\partial{x}}\\bigg|_{x, u}
787
+
788
+
789
+ Note however that the derivatives are taken at the last estimate
790
+ (as opposed to a nominal trajectory that is used in a global linearization).
791
+
792
+ The linearized term for :math:`h` is given by its Jacobian with
793
+ respect to :math:`x`:
794
+
795
+ .. math::
796
+
797
+ C = {\\partial{h} \\over \\partial{x}}\\bigg|_{x}
798
+
799
+
800
+ A last final step before getting into the filter itself
801
+ is to discretize these terms:
802
+
803
+
804
+ .. math::
805
+
806
+ F = I + A \\cdot dt
807
+
808
+ H = C
809
+
810
+
811
+ Where:
812
+
813
+ - :math:`F` is the discretized process dynamics matrix (actually a first order approximation of the state transition matrix :math:`\\Phi`)
814
+ - :math:`H` is the discrete measurement matrix
815
+ - :math:`I` is the identity matrix
816
+ - :math:`dt` is the sampling time
817
+ - :math:`A, C` are the continuous-time system dynamics and output matrices
818
+
819
+
820
+ Note that :math:`Q` and :math:`R` refer to the covariance matrices
821
+ representing the system noise in its final form, regardless of the time domain.
822
+
823
+
824
+ Now the execution of the `predict` step and the `update` step is possible.
825
+
826
+
827
+ Predict
828
+ =======
829
+
830
+ As mentioned earlier, the advancement of the state vector
831
+ is possible by using the exact equations. The second in
832
+ the following equations is an Euler integration to the
833
+ nonlinear equations.
834
+
835
+ The progression of the covariance matrix must use
836
+ the linear terms that were derived earlier.
837
+ The first equation in the following
838
+ set is the linearization of the process
839
+ equations for the covariance calculation (third):
840
+
841
+
842
+ .. math::
843
+
844
+ F = I + dt \\cdot {\\partial{f} \\over \\partial{x}}\\bigg|_{x_{k-1}^+, u{k-1}}
845
+
846
+ x_k^- = x_{k-1}^+ + dt \\cdot f(x_{k-1}^+, u_{k-1})
847
+
848
+ P_k^- = F \\cdot P_{k-1}^+ \\cdot F^T + Q
849
+
850
+ subject to initial conditions:
851
+
852
+ .. math::
853
+
854
+ x_0^+ = x_0
855
+
856
+ P_0^+ = E[x_0 \\cdot x_0^T]
857
+
858
+
859
+ Where:
860
+
861
+ - :math:`F` is the discretized process dynamics matrix
862
+ - :math:`I` is the identity matrix
863
+ - :math:`f(\\cdot)` is a vector-valued function representing the system dynamics
864
+ - :math:`dt` is the sampling time
865
+ - :math:`x_k^-` is the estimate of the system state, :math:`x_k`, before a measurement update.
866
+ - :math:`u_k` is the process input signal
867
+ - :math:`P_k^-` is the estimate of the system covariance matrix, :math:`P_k`, before a measurement update
868
+ - :math:`P_{k-1}^+` is the system covariance matrix estimate, :math:`P_k`, from previous measurement update
869
+ - :math:`Q` is the process covariance matrix
870
+ - superscript T is the transpose operator
871
+ - :math:`x_0` is the initial state estimation
872
+ - :math:`P_0` is the covariance matrix consisting of errors of the initial estimation
873
+
874
+
875
+
876
+
877
+ Update
878
+ ======
879
+
880
+ In a similar manner, the measurement equations :math:`h(x)` are
881
+ linearized (:math:`H`) before the `update` to correct the covariance matrix.
882
+ But the correction of the state vector is possible by using
883
+ the nonlinear equations themselves (third equation):
884
+
885
+
886
+ .. math::
887
+
888
+ H = {\\partial{h} \\over \\partial{x}}\\bigg|_{x_k^-}
889
+
890
+ K = P_k^- \\cdot H^T \\cdot (H \\cdot P_k^- \\cdot H^T + R)^{-1}
891
+
892
+ x_k^+ = x_k^- \\cdot K \\cdot (y - h(x))
893
+
894
+ P_k^+ = (I - K \\cdot H) \\cdot P_k^-
895
+
896
+ Where:
897
+
898
+ - :math:`H` is the discrete measurement matrix
899
+ - :math:`h(\\cdot)` is a vector-valued function representing the measurement equations
900
+ - :math:`x_k^-` is the estimate of the system state, :math:`x_k`, from the previous prediction
901
+ - :math:`K` is the Kalman gain
902
+ - :math:`P_k^-` is the estimate of the system covariance matrix, :math:`P_k`, from the previous prediction
903
+ - :math:`R` is the measurement covariance matrix
904
+ - :math:`x_k^+` is the estimate of the system state, :math:`x_k`, after a measurement update
905
+ - :math:`y` is the measure
906
+ - :math:`I` is the identity matrix
907
+ - :math:`P_k^+` is the estimate of the system covariance matrix, :math:`P_k`, after a measurement update
908
+ - superscript T is the transpose operator
909
+
910
+
911
+
912
+ Implementation (C4dynamics)
913
+ ===========================
914
+
915
+ We saw that in both the
916
+ `predict` and `update` stages,
917
+ the state doesn't have
918
+ to rely on approximated nonlinear equations
919
+ but can instead
920
+ use exact models for the process and the measurement.
921
+ However, it is sometimes more convenient to use
922
+ the existing linear for state advancements.
923
+ C4dyanmics provides an interface for each approach:
924
+ the `predict` method
925
+ can either take :math:`f(x)`
926
+ as an input argument or use the necessary matrix :math:`F`
927
+ to project the state in time.
928
+ Similarly, the update method can either
929
+ take :math:`h(x)` as an input argument
930
+ or use the necessary matrix :math:`H`
931
+ to correct :math:`x`.
932
+
933
+ Recall a few additional properties of
934
+ filter implementation in
935
+ c4dynamics, as described in the
936
+ :ref:`linear kalman <kalman_implementation>` section:
937
+
938
+ A. An Extended Kalman filter is a class.
939
+ The object holds the
940
+ attributes required to build the estimates, and
941
+ every method call relies on the results of previous calls.
942
+
943
+ B. The Extended Kalman filter is a
944
+ subclass of the state class.
945
+ The state variables are part of the filter object itself,
946
+ which inherits all the attributes of a state object.
947
+
948
+ C. The filter operations
949
+ are divided into separate `predict` and `update` methods.
950
+ :meth:`ekf.predict <c4dynamics.filters.ekf.ekf.predict>`
951
+ projects the state into
952
+ the next time.
953
+ :meth:`ekf.update <c4dynamics.filters.ekf.ekf.update>`
954
+ calculates the optimized gain and
955
+ corrects the state based on the input measurement.
956
+
957
+
958
+
959
+ Example
960
+ -------
961
+
962
+ The following example appears in several sources.
963
+ [ZP]_ provides a great deal of detail. Additional sources can be found in [SD]_.
964
+ The problem is to estimate the ballistic coefficient of a target
965
+ in a free fall where a noisy radar is tracking it.
966
+
967
+ The process equations are:
968
+
969
+ .. math::
970
+
971
+ \\dot{z} = v_z
972
+
973
+ \\dot{v}_z = {\\rho_0 \\cdot e^{-z / k} \\cdot v_z^2 \\cdot g \\over 2 \\cdot \\beta} - g
974
+
975
+ \\dot{\\beta} = \\omega_{\\beta}
976
+
977
+ y = z + \\nu_k
978
+
979
+
980
+ Where:
981
+
982
+
983
+ - :math:`\\rho_0 = 0.0034`
984
+ - :math:`k = 22,000`
985
+ - :math:`g = 32.2 ft/sec^2`
986
+ - :math:`\\omega_{\\beta} \\sim (0, 300)`
987
+ - :math:`\\nu_k \\sim (0, 500)`
988
+ - :math:`z` is the target altitude (:math:`ft`)
989
+ - :math:`v_z` is the target vertical velocity (:math:`ft/sec`)
990
+ - :math:`\\beta` is the target ballistic coefficient (:math:`lb/ft^2`)
991
+ - :math:`y` is the system measure
992
+
993
+
994
+ Let:
995
+
996
+ .. math::
997
+
998
+ \\rho = \\rho_0 \\cdot e^{-z / k}
999
+
1000
+
1001
+ The lineariztion of the process matrix for the `predict` step:
1002
+
1003
+ .. math::
1004
+
1005
+ M = \\begin{bmatrix}
1006
+ 0 & 1 & 0 \\\\
1007
+ -\\rho \\cdot g \\cdot v_z^2 / (44000 \\cdot \\beta)
1008
+ & \\rho \\cdot g \\cdot v_z / \\beta
1009
+ & -\\rho \\cdot g \\cdot v_z^2 \\cdot / (2 \\cdot \\beta^2) \\\\
1010
+ 0 & 0 & 0
1011
+ \\end{bmatrix}
1012
+
1013
+
1014
+ The measurement is a direct sample of the altitude of the target
1015
+ so these equations are already a linear function of the state.
1016
+
1017
+ .. math::
1018
+
1019
+ H = \\begin{bmatrix}
1020
+ 1 & 0 & 0
1021
+ \\end{bmatrix}
1022
+
1023
+
1024
+ We now have all we need to run the extended Kalman filter.
1025
+
1026
+
1027
+ Quick setup for an ideal case:
1028
+
1029
+ .. code::
1030
+
1031
+ >>> dt, tf = .01, 30
1032
+ >>> tspan = np.arange(0, tf + dt, dt)
1033
+ >>> dtsensor = 0.05
1034
+ >>> rho0, k = 0.0034, 22000
1035
+ >>> tgt = c4d.state(z = 100000, vz = -6000, beta = 500)
1036
+ >>> altmtr = c4d.sensors.radar(isideal = True, dt = dt)
1037
+
1038
+ Target equations of motion:
1039
+
1040
+ .. code::
1041
+
1042
+ >>> def ballistics(y, t):
1043
+ ... return [y[1], rho0 * np.exp(-y[0] / k) * y[1]**2 * c4d.g_fts2 / 2 / y[2] - c4d.g_fts2, 0]
1044
+
1045
+
1046
+ Main loop:
1047
+
1048
+ .. code::
1049
+
1050
+ >>> for t in tspan:
1051
+ ... tgt.store(t)
1052
+ ... tgt.X = odeint(ballistics, tgt.X, [t, t + dt])[-1]
1053
+ ... _, _, z = altmtr.measure(tgt, t = t, store = True)
1054
+
1055
+ .. figure:: /_examples/filters/bal_ideal.png
1056
+
1057
+
1058
+ These figures show the time histories of the altitude, velocity,
1059
+ and ballistic coefficient, for a target in a free fall with ideal conditions.
1060
+
1061
+ Let's examine the `ekf` capability to estimate :math:`\\beta` at the presence of errors.
1062
+ Errors in initial conditions introduced into each one of the variables:
1063
+ :math:`z_{0_{err}} = 25, v_{z_{0_{err}}} = -150, \\beta_{0_{err}} = 300`.
1064
+ The uncertainty in the ballistic coefficient is given in terms of
1065
+ the spectral density of a continuous system, such that for flight time :math:`t_f`,
1066
+ the standard deviation of the ballistic coefficient noise
1067
+ is :math:`\\omega_{\\beta} = \\sqrt{\\beta_{err} \\cdot t_f}`.
1068
+ The measurement noise is :math:`\\nu = \\sqrt{500}`. These use
1069
+ for the noise covariance matrices :math:`Q, R` as for
1070
+ the initialization of the state covariance matrix :math:`P`:
1071
+
1072
+
1073
+ .. code::
1074
+
1075
+ >>> zerr, vzerr, betaerr = 25, -150, 300
1076
+ >>> nu = np.sqrt(500)
1077
+ >>> p0 = np.diag([nu**2, vzerr**2, betaerr**2])
1078
+ >>> R = nu**2 / dt
1079
+ >>> Q = np.diag([0, 0, betaerr**2 / tf * dt])
1080
+ >>> H = [1, 0, 0]
1081
+ >>> tgt = c4d.state(z = 100000, vz = -6000, beta = 500)
1082
+ >>> # altmeter and ekf construction:
1083
+ >>> altmtr = c4d.sensors.radar(rng_noise_std = nu, dt = dtsensor)
1084
+ >>> ekf = c4d.filters.ekf(X = {'z': tgt.z + zerr, 'vz': tgt.vz + vzerr
1085
+ ... , 'beta': tgt.beta + betaerr}
1086
+ ... , P0 = p0, H = H, Q = Q, R = R)
1087
+
1088
+
1089
+
1090
+ The main loop includes the simulation of the target motion, the linearization
1091
+ and discretization of the process equations, and calling the `predict` method.
1092
+ Then linearization and discretization of the measurement equations (not relevant
1093
+ here as the measurement is already linear), and calling the `update` method.
1094
+
1095
+ .. code::
1096
+
1097
+ >>> for t in tspan:
1098
+ ... tgt.store(t)
1099
+ ... ekf.store(t)
1100
+ ... # target motion simulation
1101
+ ... tgt.X = odeint(ballistics, tgt.X, [t, t + dt])[-1]
1102
+ ... # process linearization
1103
+ ... rhoexp = rho0 * np.exp(-ekf.z / k) * c4d.g_fts2 * ekf.vz / ekf.beta
1104
+ ... fx = [ekf.vz, rhoexp * ekf.vz / 2 - c4d.g_fts2, 0]
1105
+ ... f2i = rhoexp * np.array([-ekf.vz / 2 / k, 1, -ekf.vz / 2 / ekf.beta])
1106
+ ... # discretization
1107
+ ... F = np.array([[0, 1, 0], f2i, [0, 0, 0]]) * dt + np.eye(3)
1108
+ ... # ekf predict
1109
+ ... ekf.predict(F = F, fx = fx, dt = dt)
1110
+ ... # take a measure
1111
+ ... _, _, Z = altmtr.measure(tgt, t = t, store = True)
1112
+ ... if Z is not None:
1113
+ ... ekf.update(Z) # doctest: +IGNORE_OUTPUT
1114
+
1115
+
1116
+ Though the `update` requires also the linear
1117
+ process matrix (:math:`F`), the `predict` method
1118
+ stores the introduced `F` to prove that
1119
+ the `update` step always comes after calling the `predict`.
1120
+
1121
+
1122
+ .. figure:: /_examples/filters/bal_filtered.png
1123
+
1124
+
1125
+
1126
+ A few steps to consider when designing a Kalman filter:
1127
+
1128
+ - Spend some time understanding the dynamics. It's the basis of great filtering.
1129
+ - If the system is nonlinear, identify the nonlinearity; is it in the process? in the measurement? both?
1130
+ - Always prioriorotize linear Kalman. If possible, find a nominal trajectory to linearize the system about.
1131
+ - The major time-consuming activity is researching the balance between the noise matrices `Q` and `R`.
1132
+ - -> Plan your time in advance.
1133
+ - Use a framework that provides you with the most flexibility and control.
1134
+ - Make fun!
1135
+
1136
+
1137
+
1138
+
1139
+
1140
+ ***************
1141
+ Low-pass Filter
1142
+ ***************
1143
+
1144
+ A first-order low-pass filter is a fundamental component in signal processing
1145
+ and control systems, designed to allow low-frequency signals to pass while
1146
+ attenuating higher-frequency noise.
1147
+
1148
+ This type of filter is represented by a simple differential equation
1149
+ and is commonly used for signal smoothing and noise reduction.
1150
+
1151
+
1152
+
1153
+ A low-pass filter (LPF) can be described by the differential equation:
1154
+
1155
+ .. math::
1156
+
1157
+ \\alpha \\cdot \\dot{y} + y = x
1158
+
1159
+ Where:
1160
+
1161
+ - :math:`y` is the output signal
1162
+ - :math:`x` is the input signal
1163
+ - :math:`\\alpha` is a shaping parameter that influences the filter's cutoff frequency
1164
+
1165
+ In signal processing, the LPF smooths signals by reducing high-frequency noise.
1166
+ In control systems, it is often used to model a first-order lag.
1167
+
1168
+
1169
+ Frequency-Domain
1170
+ ================
1171
+
1172
+ In the frequency domain, the transfer function of a first-order low-pass filter is given by:
1173
+
1174
+ .. math::
1175
+
1176
+ H(s) = \\frac{Y(s)}{X(s)} = \\frac{1}{\\alpha \\cdot s + 1}
1177
+
1178
+ Where:
1179
+
1180
+ - :math:`H(s)` is the transfer function
1181
+ - :math:`Y(s)` and :math:`X(s)` are the Laplace transforms of the output and input signals respectively
1182
+ - :math:`s` is the complex frequency variable in the Laplace transform, defined as :math:`s = j \\cdot 2 \\cdot \\pi \\cdot f`
1183
+ - :math:`\\alpha` is a constant related to the cutoff frequency
1184
+
1185
+
1186
+
1187
+ Time-Constant
1188
+ =============
1189
+
1190
+ The cutoff frequency :math:`f_c` is the frequency at which the filter attenuates the signal to approximately 70.7% (-3dB) of its maximum value. It is related to the time constant :math:`\\tau` by:
1191
+
1192
+ .. math::
1193
+
1194
+ f_c = \\frac{1}{2 \\cdot \\pi \\cdot \\tau}
1195
+
1196
+ and equivalently,
1197
+
1198
+ .. math::
1199
+
1200
+ \\tau = \\frac{1}{2 \\cdot \\pi \\cdot f_c}
1201
+
1202
+ In practical applications, the desired cutoff frequency determines :math:`\\tau`, which in turn defines the filter behavior.
1203
+
1204
+
1205
+ Discrete-Time
1206
+ =============
1207
+
1208
+ In the discrete-time domain, a first-order low-pass filter is represented as:
1209
+
1210
+ .. math::
1211
+
1212
+ y_k = \\alpha \\cdot x_k + (1 - \\alpha) \\cdot y_{k-1}
1213
+
1214
+ where :math:`y_k` and :math:`x_k` are the discrete output and input signals at sample index `k`, and :math:`\\alpha` is the filter coefficient derived from the sample rate and cutoff frequency.
1215
+
1216
+
1217
+ Implementation (C4dynamics)
1218
+ ===========================
1219
+
1220
+ This filter class can be initialized with a
1221
+ cutoff frequency and sample rate, allowing users to simulate
1222
+ first-order systems.
1223
+
1224
+
1225
+ References
1226
+ ==========
1227
+
1228
+
1229
+ .. [SD] Simon, Dan,
1230
+ 'Optimal State Estimation: Kalman, H Infinity, and Nonlinear Approaches',
1231
+ Hoboken: Wiley, 2006.
1232
+
1233
+
1234
+ .. [AG] Agranovich, Grigory,
1235
+ Lecture Notes on Modern and Digital Control Systems,
1236
+ University of Ariel, 2012-2013.
1237
+
1238
+
1239
+ .. [ZP] Zarchan, Paul,
1240
+ 'Tactical and Strategic Missile Guidance',
1241
+ American Institute of Aeronautics and Astronautics, 1990.
1242
+
1243
+
1244
+ .. [MZ] Meri, Ziv,
1245
+ `Extended Lyapunov Analysis and Simulative Investigations in Stability of Proportional Navigation Guidance Systems
1246
+ <../_static/PN_Stability.pdf>`_,
1247
+ MSc. Thesis supervised by prof. Grigory Agranovich, University of Ariel, 2020.
1248
+
1249
+ """
1250
+
1251
+
1252
+ # NOTE
1253
+ # the line:
1254
+ # "Note that the divider of R is :math:`dt_{measure}` rather than simply :math:`dt`
1255
+ # because often times the sampling-time of the measures is different than
1256
+ # the sampling-time that uses to integrate the dynamics.
1257
+ # However, when the measure times and the integration times are equal,
1258
+ # then :math:`dt_{measure} = dt`."
1259
+ # required clarification. i think i took it from simon. not sure.
1260
+ # anyway it's weird. as for a continuous system with Q = R
1261
+ # it's gone have balanced weights.
1262
+ # however the translation of it to discrete matrices with Q = Q*dt, R = R/dt
1263
+ # violates the balance.
1264
+ # I think it's realy wrong. in pg 232 (247) he says explicitly:
1265
+ # now let us think about measurement noise. suppose we have a discrete-time
1266
+ # measurement of a constant x every T seconds. The measurement times are tk = k*T (k=1,2,..).
1267
+ # ..
1268
+ # the error covariance at time tk is independent of the sample time T if: R = Rc/T.
1269
+ # where Rc is some constant.
1270
+ # this implis that
1271
+ # lim(R, T->0) = Rc * delta(t)
1272
+ # where delta(t) is the constinuous time impulse function.
1273
+ # this estabilshes the equivalence between white meausrement noise
1274
+ # in discrete time and continuous time. the effects of white measuremnt noise in discrete time
1275
+ # and continuous time are the same if:
1276
+ # vk ~ (0, R)
1277
+ # v(t) ~ (0, Rc)
1278
+ # i think i should simply say that c4d kamlans are discrete kalmans.
1279
+ # only that the user can provdie also cont. matrices.
1280
+ # and now that i think about that its become more clear to me
1281
+ # that what i should do is only suggest discrete filter and also
1282
+ # provide util for covnerting cont to discr system and then the user
1283
+ # provides the disc systems.
1284
+ # i rather think that best thing is to separate the covariance matrices from
1285
+ # discretization and just present it as given for the final system.
1286
+ # or to add a remark and say that if also the covarinace matrices are given for
1287
+ # cont system then this the way to discretize it.
1288
+ # or add a note that in practice the noise of the discrete system should be know ampricialy or by data sheet.
1289
+ # or to add that in practice the sensors are taking measurements in discrete times.
1290
+ #
1291
+ # another problem arises:
1292
+ # the kalman is implemented as discrete-time system.
1293
+ # if the user provides system matrices A,B,C in the continuous-time domain,
1294
+ # then i ask also the time-step parameter and convert them to the discrete-time
1295
+ # form and then calculate the filter equations.
1296
+ # the problem that if the user provide continuous-time matrices,
1297
+ # probably he also provide his noise covariances Q and R in the continuous time domain,
1298
+ # which means that the noise descriptions do not match the discrete form of my filter.
1299
+ # what do u suggest to do?
1300
+ # gpt:
1301
+ # convert q by yourself according to:
1302
+ # Qd = A^-1 * (e^AT - I) * Qc
1303
+ # R is in anyway sampled in disc times.
1304
+ # alternatively change the model to discrete inputs only.
1305
+ #
1306
+ #
1307
+ # FIXME
1308
+ # the example of the kalman filter must be fixed as there's no reason to
1309
+ # divide R here by dt becuase it's the vairance of the radar in the given time step!!
1310
+ # see figures in w.doc.
1311
+
1312
+ '''
1313
+ Franklin, G.F., Powell,D.J., and Workman, M.L., Digital Control of Dynamic Systems
1314
+ ch 9
1315
+ 9.4.2 the discrete kf:
1316
+ w(t) and v(t) have no time correlation.
1317
+ E(w*w^T)=Rw=Q
1318
+ E(v*v^T)=Rv=R
1319
+
1320
+ 9.4.4. noise matrices and discerete equivalents.
1321
+ the process uncertainty acts on the continuous portion of the system.
1322
+
1323
+
1324
+
1325
+
1326
+ i have a cont system sampled with a
1327
+ discrete samples camera. let's say the sensor errors with its algo are
1328
+ sig_camera in both position and bounding box.
1329
+ i want to show an example where i give the camera and the process the
1330
+ same weight and i run them in a steady state mode.
1331
+ the model in const velocity model.
1332
+ then i say i want to overcome an error in the linearity and extend the
1333
+ uncertainty of the process with still continuous modeling of the process.
1334
+ ** remark: how at all can kalman designers
1335
+ introduce the uncertainty in the noise? after all kalman
1336
+ restrains that factor to be a white noise with mean 0 and
1337
+ im not sure the model uncertainty behaves in that way.
1338
+ ** any way in the next example i want to show
1339
+ that same results could be achieved by using discrete matrices.
1340
+ '''
1341
+
1342
+ import sys, os
1343
+ sys.path.append('.')
1344
+
1345
+ from c4dynamics.filters.kalman import kalman
1346
+ from c4dynamics.filters.ekf import ekf
1347
+ from c4dynamics.filters.lowpass import lowpass
1348
+
1349
+
1350
+ if __name__ == "__main__":
1351
+
1352
+ import doctest, contextlib
1353
+ from c4dynamics import IgnoreOutputChecker, cprint
1354
+
1355
+ # Register the custom OutputChecker
1356
+ doctest.OutputChecker = IgnoreOutputChecker
1357
+
1358
+ tofile = False
1359
+ optionflags = doctest.FAIL_FAST
1360
+
1361
+ if tofile:
1362
+ with open(os.path.join('tests', '_out', 'output.txt'), 'w') as f:
1363
+ with contextlib.redirect_stdout(f), contextlib.redirect_stderr(f):
1364
+ result = doctest.testmod(optionflags = optionflags)
1365
+ else:
1366
+ result = doctest.testmod(optionflags = optionflags)
1367
+
1368
+ if result.failed == 0:
1369
+ cprint(os.path.basename(__file__) + ": all tests passed!", 'g')
1370
+ else:
1371
+ print(f"{result.failed}")
1372
+
1373
+