pygnss 1.0.0__cp314-cp314t-musllinux_1_2_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pygnss might be problematic. Click here for more details.
- pygnss/__init__.py +1 -0
- pygnss/_c_ext/src/hatanaka.c +95 -0
- pygnss/_c_ext/src/helpers.c +17 -0
- pygnss/_c_ext/src/mtable_init.c +44 -0
- pygnss/_c_ext.cpython-314t-x86_64-linux-musl.so +0 -0
- pygnss/cl.py +148 -0
- pygnss/constants.py +4 -0
- pygnss/decorator.py +47 -0
- pygnss/file.py +36 -0
- pygnss/filter/__init__.py +62 -0
- pygnss/filter/ekf.py +80 -0
- pygnss/filter/models.py +73 -0
- pygnss/filter/ukf.py +322 -0
- pygnss/geodetic.py +1169 -0
- pygnss/gnss/__init__.py +0 -0
- pygnss/gnss/edit.py +66 -0
- pygnss/gnss/observables.py +43 -0
- pygnss/gnss/residuals.py +43 -0
- pygnss/gnss/types.py +359 -0
- pygnss/hatanaka.py +51 -0
- pygnss/ionex.py +410 -0
- pygnss/iono/__init__.py +0 -0
- pygnss/iono/chapman.py +35 -0
- pygnss/iono/gim.py +131 -0
- pygnss/logger.py +70 -0
- pygnss/nequick.py +57 -0
- pygnss/orbit/__init__.py +0 -0
- pygnss/orbit/kepler.py +63 -0
- pygnss/orbit/tle.py +186 -0
- pygnss/parsers/rtklib/stats.py +166 -0
- pygnss/rinex.py +2161 -0
- pygnss/sinex.py +121 -0
- pygnss/stats.py +75 -0
- pygnss/tensorial.py +50 -0
- pygnss/time.py +350 -0
- pygnss-1.0.0.dist-info/METADATA +56 -0
- pygnss-1.0.0.dist-info/RECORD +41 -0
- pygnss-1.0.0.dist-info/WHEEL +5 -0
- pygnss-1.0.0.dist-info/entry_points.txt +8 -0
- pygnss-1.0.0.dist-info/licenses/LICENSE +21 -0
- pygnss-1.0.0.dist-info/top_level.txt +1 -0
pygnss/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
#include <Python.h>
|
|
2
|
+
#include <datetime.h>
|
|
3
|
+
|
|
4
|
+
#include "hatanaka/include/crx2rnx.h"
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
static char* get_crx_line(void* _args, size_t n_max, char* dst) {
|
|
8
|
+
|
|
9
|
+
FILE* input_fh = (FILE*)_args;
|
|
10
|
+
return fgets(dst, n_max, input_fh);
|
|
11
|
+
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
static bool is_eof(void* _args) {
|
|
15
|
+
|
|
16
|
+
FILE* input_fh = (FILE*)_args;
|
|
17
|
+
return (fgetc(input_fh) == EOF);
|
|
18
|
+
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
static int on_measurement(const struct gnss_meas* gnss_meas, void* _args) {
|
|
22
|
+
|
|
23
|
+
static const int N_FIELDS = 5; // Number of fields for struct gnss_meas
|
|
24
|
+
|
|
25
|
+
int ret = -1;
|
|
26
|
+
PyObject* list = (PyObject*)_args;
|
|
27
|
+
|
|
28
|
+
if (gnss_meas == NULL) {
|
|
29
|
+
goto exit;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
PyDateTime_IMPORT;
|
|
33
|
+
|
|
34
|
+
// Create Python lists for each inner list
|
|
35
|
+
PyObject* row = PyList_New(N_FIELDS);
|
|
36
|
+
|
|
37
|
+
double timestamp = (double)gnss_meas->gps_time.tv_sec + (double)gnss_meas->gps_time.tv_nsec / 1e9;
|
|
38
|
+
PyObject* time_tuple = Py_BuildValue("(d)", timestamp);
|
|
39
|
+
PyObject* date_time = PyDateTime_FromTimestamp(time_tuple);
|
|
40
|
+
|
|
41
|
+
PyList_SetItem(row, 0, date_time);
|
|
42
|
+
PyList_SetItem(row, 1, PyUnicode_FromStringAndSize(gnss_meas->satid, 3));
|
|
43
|
+
PyList_SetItem(row, 2, PyUnicode_FromStringAndSize(gnss_meas->rinex3_code, 3));
|
|
44
|
+
PyList_SetItem(row, 3, PyFloat_FromDouble(gnss_meas->value));
|
|
45
|
+
PyList_SetItem(row, 4, PyLong_FromUnsignedLong(gnss_meas->lli));
|
|
46
|
+
|
|
47
|
+
// Add inner lists to the outer list
|
|
48
|
+
PyList_Append(list, row);
|
|
49
|
+
Py_DECREF(row); // Decrement the reference count of 'row'
|
|
50
|
+
|
|
51
|
+
ret = 0;
|
|
52
|
+
exit:
|
|
53
|
+
return ret;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
PyObject *_read_crx(PyObject* self, PyObject* args, PyObject* kwargs) {
|
|
57
|
+
|
|
58
|
+
char *filename = NULL;
|
|
59
|
+
struct crx2rnx* crx2rnx = NULL;
|
|
60
|
+
int ret = -1;
|
|
61
|
+
PyObject* list = PyList_New(0);
|
|
62
|
+
|
|
63
|
+
struct crx2rnx_callbacks callbacks = {
|
|
64
|
+
.on_measurement = on_measurement,
|
|
65
|
+
.on_measurement_args = list
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
// Parse the filename argument
|
|
69
|
+
if (!PyArg_ParseTuple(args, "s", &filename)) {
|
|
70
|
+
PyErr_SetString(PyExc_TypeError, "Expected a string filename");
|
|
71
|
+
goto end;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Open the file
|
|
75
|
+
FILE* fp = fopen(filename, "r");
|
|
76
|
+
if (fp == NULL) {
|
|
77
|
+
PyErr_SetString(PyExc_IOError, "Could not open file");
|
|
78
|
+
goto end;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
crx2rnx = crx2rnx__init(false, false, NULL, get_crx_line, (void*)fp, is_eof, (void*)fp, &callbacks);
|
|
82
|
+
|
|
83
|
+
ret = crx2rnx__run(crx2rnx);
|
|
84
|
+
|
|
85
|
+
if (ret < 0) {
|
|
86
|
+
PyErr_SetString(PyExc_IOError, "There was an issue processing the Hatanaka file");
|
|
87
|
+
PyList_SetSlice(list, 0, PY_SSIZE_T_MAX, NULL); // clear the list
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
// Clean-up
|
|
91
|
+
fclose(fp);
|
|
92
|
+
end:
|
|
93
|
+
return list;
|
|
94
|
+
|
|
95
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
#include <stdlib.h>
|
|
2
|
+
#include <Python.h>
|
|
3
|
+
|
|
4
|
+
#include "../include/helpers.h"
|
|
5
|
+
|
|
6
|
+
PyObject *convert_to_pylist(const double* array, size_t n) {
|
|
7
|
+
|
|
8
|
+
Py_ssize_t len = n;
|
|
9
|
+
PyObject* list = PyList_New(len);
|
|
10
|
+
|
|
11
|
+
for (Py_ssize_t i = 0; i < len; i++) {
|
|
12
|
+
PyObject* value = PyFloat_FromDouble(array[i]);
|
|
13
|
+
PyList_SetItem(list, i, value);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
return list;
|
|
17
|
+
}
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
#include <Python.h>
|
|
2
|
+
|
|
3
|
+
#include "hatanaka.h"
|
|
4
|
+
|
|
5
|
+
static PyMethodDef module_methods[] = {
|
|
6
|
+
{ "_read_crx", (PyCFunction)_read_crx, METH_VARARGS | METH_KEYWORDS,
|
|
7
|
+
"Read a Hatanaka (gzip uncompressed) file and generate a numpy array\n\n"
|
|
8
|
+
":param filename: Name of the Hatanaka file to process\n"
|
|
9
|
+
":return: Numpy array\n\n"},
|
|
10
|
+
{NULL, NULL, 0, NULL}, /* Sentinel */
|
|
11
|
+
};
|
|
12
|
+
|
|
13
|
+
/*----------------------------------------------------------------------------*/
|
|
14
|
+
|
|
15
|
+
static struct PyModuleDef module = {
|
|
16
|
+
PyModuleDef_HEAD_INIT,
|
|
17
|
+
"_c_ext", /* name of the module*/
|
|
18
|
+
"C extension methods",
|
|
19
|
+
-1, // size of per-interpreter state of the module,
|
|
20
|
+
// or -1 if the module keeps state in global variables.
|
|
21
|
+
module_methods
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
PyMODINIT_FUNC PyInit__c_ext(void) {
|
|
26
|
+
|
|
27
|
+
PyObject* m = NULL;
|
|
28
|
+
|
|
29
|
+
// // Classes
|
|
30
|
+
// if (PyType_Ready(HatanakaReaderType) < 0) {
|
|
31
|
+
// goto end;
|
|
32
|
+
// }
|
|
33
|
+
|
|
34
|
+
m = PyModule_Create(&module);
|
|
35
|
+
if (m == NULL) {
|
|
36
|
+
goto end;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Py_INCREF(HatanakaReaderType);
|
|
40
|
+
// PyModule_AddObject(m, "HatanakaReader", (PyObject*)HatanakaReaderType);
|
|
41
|
+
|
|
42
|
+
end:
|
|
43
|
+
return m;
|
|
44
|
+
}
|
|
Binary file
|
pygnss/cl.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Program to perform various columnar operations on inputs
|
|
3
|
+
|
|
4
|
+
All indicators have this format
|
|
5
|
+
|
|
6
|
+
'x0'
|
|
7
|
+
|
|
8
|
+
where 'x' can be one of the following
|
|
9
|
+
|
|
10
|
+
- 'c' - Select column
|
|
11
|
+
- 'd' - diff column relative to the previous value
|
|
12
|
+
- 'f' - diff column relative to the first value of the column
|
|
13
|
+
- 'm' - Compute the minutes elapsed since the first value (divide column by 60,
|
|
14
|
+
as it assumes that the values are in seconds))
|
|
15
|
+
- 'h' - Compute the hours elapsed since the first value (divide column by 3600,
|
|
16
|
+
as it assumes that the values are in seconds))
|
|
17
|
+
|
|
18
|
+
and '0' is the column number (1 based)
|
|
19
|
+
|
|
20
|
+
Examples:
|
|
21
|
+
|
|
22
|
+
(a) Select columns with the indicated order (first output 5th column and then the
|
|
23
|
+
first column)
|
|
24
|
+
cat file.txt | cl c5 c1
|
|
25
|
+
|
|
26
|
+
(b) Select 6th column and output 1st column relative to the first one
|
|
27
|
+
cat file.txt | cl c6 f1
|
|
28
|
+
|
|
29
|
+
(c) Make a diff of the third column relative to the first value
|
|
30
|
+
cat file.txt | cl f3
|
|
31
|
+
"""
|
|
32
|
+
import argparse
|
|
33
|
+
import sys
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ColumnProcess:
|
|
37
|
+
"""
|
|
38
|
+
Class that manages the processing of a set of fields based on some criteria
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self, colprocstr):
|
|
42
|
+
"""
|
|
43
|
+
Class initialization. This method receives a string defining the type
|
|
44
|
+
of operation to be performed
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
if len(colprocstr) < 2:
|
|
48
|
+
raise ValueError(f"Do not know how to interpret [ {colprocstr} ], "
|
|
49
|
+
"column selector should be of the form 'n0', with "
|
|
50
|
+
"'n' being a character and '0' a column number")
|
|
51
|
+
|
|
52
|
+
self.process_type = colprocstr[0]
|
|
53
|
+
|
|
54
|
+
# Obtain the column number, taking into account that the indices must
|
|
55
|
+
# be translated from 1-based to 0-based
|
|
56
|
+
self.process_column = int(colprocstr[1:]) - 1
|
|
57
|
+
|
|
58
|
+
self.previous_value = None
|
|
59
|
+
self.first_value = None
|
|
60
|
+
|
|
61
|
+
def process(self, fields):
|
|
62
|
+
"""
|
|
63
|
+
Process a set of fields. Raise an exception if
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
if self.process_column >= len(fields):
|
|
67
|
+
raise IndexError(f"Unable to fecth column [ {self.process_column + 1} ] (1-based) "
|
|
68
|
+
f"in line with [ {len(fields)} ] fields. "
|
|
69
|
+
f"Offending line [ {' '.join(fields)} ]\n")
|
|
70
|
+
|
|
71
|
+
column_value = fields[self.process_column]
|
|
72
|
+
|
|
73
|
+
if self.process_type == 'c':
|
|
74
|
+
|
|
75
|
+
return column_value
|
|
76
|
+
|
|
77
|
+
elif self.process_type == 'f' or self.process_type == 'm' or self.process_type == 'h':
|
|
78
|
+
|
|
79
|
+
incoming_value = float(column_value)
|
|
80
|
+
|
|
81
|
+
if self.first_value is None:
|
|
82
|
+
self.first_value = incoming_value
|
|
83
|
+
|
|
84
|
+
value = incoming_value - self.first_value
|
|
85
|
+
|
|
86
|
+
if self.process_type == 'm':
|
|
87
|
+
value = value / 60.0
|
|
88
|
+
elif self.process_type == 'h':
|
|
89
|
+
value = value / 3600.0
|
|
90
|
+
|
|
91
|
+
return str(value)
|
|
92
|
+
|
|
93
|
+
elif self.process_type == 'd':
|
|
94
|
+
|
|
95
|
+
incoming_value = float(column_value)
|
|
96
|
+
|
|
97
|
+
if self.previous_value is None:
|
|
98
|
+
self.previous_value = incoming_value
|
|
99
|
+
|
|
100
|
+
value = incoming_value - self.previous_value
|
|
101
|
+
|
|
102
|
+
# Update internal value only if the process method is the difference
|
|
103
|
+
# relative to the previous value
|
|
104
|
+
if self.process_type == 'd':
|
|
105
|
+
self.previous_value = incoming_value
|
|
106
|
+
|
|
107
|
+
return str(value)
|
|
108
|
+
|
|
109
|
+
else:
|
|
110
|
+
raise ValueError("Do not know what process type is '%c'" % self.process_type)
|
|
111
|
+
|
|
112
|
+
def __str__(self):
|
|
113
|
+
|
|
114
|
+
return self.__repr__()
|
|
115
|
+
|
|
116
|
+
def __repr__(self):
|
|
117
|
+
|
|
118
|
+
return "Process type [ %s ], process column [ %d ]\n" % (self.process_type, self.process_column)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def entry_point():
|
|
122
|
+
|
|
123
|
+
# Process the options of the executable
|
|
124
|
+
|
|
125
|
+
parser = argparse.ArgumentParser(description=__doc__,
|
|
126
|
+
formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
127
|
+
|
|
128
|
+
parser.add_argument('columns', metavar='<selector>', type=str, nargs='+',
|
|
129
|
+
help="Set of column selectors and operators")
|
|
130
|
+
|
|
131
|
+
args = parser.parse_args()
|
|
132
|
+
|
|
133
|
+
# Make an array of objects that will take care of processing the fields
|
|
134
|
+
colprocs = [ColumnProcess(colproc) for colproc in args.columns]
|
|
135
|
+
|
|
136
|
+
for line in sys.stdin:
|
|
137
|
+
|
|
138
|
+
# If line is empty, print an empty line
|
|
139
|
+
if len(line.strip()) == 0:
|
|
140
|
+
sys.stdout.write("\n")
|
|
141
|
+
continue
|
|
142
|
+
|
|
143
|
+
fields = line.strip().split()
|
|
144
|
+
|
|
145
|
+
# Process each column
|
|
146
|
+
newfields = [cp.process(fields) for cp in colprocs]
|
|
147
|
+
|
|
148
|
+
sys.stdout.write(" ".join(newfields) + "\n")
|
pygnss/constants.py
ADDED
pygnss/decorator.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import gzip
|
|
2
|
+
from functools import wraps
|
|
3
|
+
import subprocess
|
|
4
|
+
import warnings
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def deprecated(alternative):
|
|
8
|
+
def decorator(func):
|
|
9
|
+
def new_func(*args, **kwargs):
|
|
10
|
+
# Raise a DeprecationWarning with the specified message.
|
|
11
|
+
message = f"Call to deprecated function {func.__name__}."
|
|
12
|
+
if alternative:
|
|
13
|
+
message += f" Use {alternative} instead."
|
|
14
|
+
warnings.warn(message, DeprecationWarning, stacklevel=2)
|
|
15
|
+
return func(*args, **kwargs)
|
|
16
|
+
return new_func
|
|
17
|
+
return decorator
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def read_contents(func):
|
|
21
|
+
"""
|
|
22
|
+
Decorator to handle gzip compression based on filename and pass its contents
|
|
23
|
+
to the function
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
@wraps(func)
|
|
27
|
+
def wrapper(filename, *args, **kwargs):
|
|
28
|
+
|
|
29
|
+
doc = None
|
|
30
|
+
|
|
31
|
+
if filename.endswith('.gz'):
|
|
32
|
+
with gzip.open(filename, 'rt', encoding='utf-8') as fh:
|
|
33
|
+
doc = fh.read()
|
|
34
|
+
elif filename.endswith('.Z'):
|
|
35
|
+
result = subprocess.run(['uncompress', '-c', filename],
|
|
36
|
+
stdout=subprocess.PIPE,
|
|
37
|
+
stderr=subprocess.PIPE,
|
|
38
|
+
check=True,
|
|
39
|
+
text=True)
|
|
40
|
+
doc = result.stdout
|
|
41
|
+
else:
|
|
42
|
+
with open(filename, 'rt', encoding='utf-8') as fh:
|
|
43
|
+
doc = fh.read()
|
|
44
|
+
|
|
45
|
+
return func(doc, *args, **kwargs)
|
|
46
|
+
|
|
47
|
+
return wrapper
|
pygnss/file.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
from typing import IO
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def process_filename_or_file_handler(mode):
|
|
6
|
+
def decorator(func):
|
|
7
|
+
@wraps(func)
|
|
8
|
+
def wrapper(input, *args, **kwargs):
|
|
9
|
+
if isinstance(input, str):
|
|
10
|
+
with open(input, mode) as fh:
|
|
11
|
+
return func(fh, *args, **kwargs)
|
|
12
|
+
else:
|
|
13
|
+
return func(input, *args, **kwargs)
|
|
14
|
+
return wrapper
|
|
15
|
+
return decorator
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def grep_lines(filename: str, pattern_string: str):
|
|
19
|
+
"""
|
|
20
|
+
Generator function used to grep lines from a file. Can be used in methods
|
|
21
|
+
such as numpy.genfromtxt, ...
|
|
22
|
+
|
|
23
|
+
>>> generator = grep_lines(filename, "pattern")
|
|
24
|
+
>>> data = numpy.loadtxt(generator)
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
with open(filename, 'r') as fh:
|
|
28
|
+
for line in fh:
|
|
29
|
+
if pattern_string in line:
|
|
30
|
+
yield line
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def skip_lines(fh: IO, n_lines: int):
|
|
34
|
+
|
|
35
|
+
for _ in range(n_lines):
|
|
36
|
+
fh.readline()
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module for the filter class
|
|
3
|
+
|
|
4
|
+
Some notation conventions:
|
|
5
|
+
|
|
6
|
+
- $x_m$ Predicted state from the previous k-1 state
|
|
7
|
+
- $y_m$ indicates the observations resulted from the predicted
|
|
8
|
+
state ($x_m$)
|
|
9
|
+
- $H$ is the design (Jacobian) matrix, that translates from state to observation
|
|
10
|
+
(i.e. $y = H \\cdot x$)
|
|
11
|
+
- $\\Phi$ is the state transition matrix, that translates from the
|
|
12
|
+
state k-1 to the predicted state ($x_m$)
|
|
13
|
+
"""
|
|
14
|
+
from abc import ABC, abstractmethod
|
|
15
|
+
from collections import namedtuple
|
|
16
|
+
from typing import Tuple
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
|
|
20
|
+
ModelObs = namedtuple('ModelObs', ('y_m', 'H')) # y_m must be an array of arrays (2D shaped)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Model(ABC):
|
|
24
|
+
"""
|
|
25
|
+
Abstract class that declares the interface for entities that model
|
|
26
|
+
an entity to be used by an estimation filter
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
def propagate_state(self, state: np.array) -> np.array:
|
|
31
|
+
"""
|
|
32
|
+
Propagate a state from time k-1 to k
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
@abstractmethod
|
|
36
|
+
def to_observations(self, state: np.array, compute_jacobian: bool = False, **kwargs) -> ModelObs:
|
|
37
|
+
"""
|
|
38
|
+
Propagate a state to its corresponding modelled observations (i.e.
|
|
39
|
+
compute expected observations/measurements for the input state)
|
|
40
|
+
|
|
41
|
+
:return: a tuple where the first element are the observations and the second
|
|
42
|
+
is the Jacobian matrix (if compute_jacobian is True, otherwise the second
|
|
43
|
+
element will be None)
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def Phi(self):
|
|
47
|
+
"""
|
|
48
|
+
Provide with the state transition matrix (also noted F in certain
|
|
49
|
+
Kalman notation)
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class StateHandler(ABC):
|
|
54
|
+
"""
|
|
55
|
+
Abstract class that handles the state generated by UKF
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
@abstractmethod
|
|
59
|
+
def process_state(self, state: np.array, covariance_matrix: np.array, **kwargs):
|
|
60
|
+
"""
|
|
61
|
+
Process the state and associated covariance_matrix
|
|
62
|
+
"""
|
pygnss/filter/ekf.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Module for the EKF
|
|
3
|
+
"""
|
|
4
|
+
import logging
|
|
5
|
+
from typing import Tuple
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from . import StateHandler, Model
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Ekf(object):
|
|
13
|
+
"""
|
|
14
|
+
Extended Kalman Filter (EKF)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
def __init__(self,
|
|
18
|
+
x0: np.array,
|
|
19
|
+
P0: np.array,
|
|
20
|
+
Q: np.array,
|
|
21
|
+
model: Model,
|
|
22
|
+
state_handler: StateHandler,
|
|
23
|
+
logger: logging.Logger = logging):
|
|
24
|
+
"""
|
|
25
|
+
Initialize the EKF filter object
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
self.x = x0
|
|
29
|
+
self.P = P0
|
|
30
|
+
self.Q = Q
|
|
31
|
+
|
|
32
|
+
self.model = model
|
|
33
|
+
self.state_handler = state_handler
|
|
34
|
+
|
|
35
|
+
self.logger = logger
|
|
36
|
+
|
|
37
|
+
self.L = len(self.x)
|
|
38
|
+
|
|
39
|
+
def process(self, y_k: np.array, R: np.array, **kwargs):
|
|
40
|
+
"""
|
|
41
|
+
Process an observation batch
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
# Time update ----------------------------------------------------------
|
|
45
|
+
x_m, P_m = self._time_update()
|
|
46
|
+
|
|
47
|
+
# Measurement update ---------------------------------------------------
|
|
48
|
+
y_m, H = self.model.to_observations(x_m, compute_jacobian=True, **kwargs)
|
|
49
|
+
|
|
50
|
+
P_yy = H @ P_m @ H.T + R
|
|
51
|
+
P_xy = P_m @ H.T
|
|
52
|
+
|
|
53
|
+
self.x = x_m
|
|
54
|
+
self.P = P_m
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
K = P_xy @ np.linalg.inv(P_yy) # Calculate Kalman gain
|
|
58
|
+
|
|
59
|
+
self.x = self.x + K @ (y_k - y_m) # Update state estimate
|
|
60
|
+
self.P = self.P - K @ H @ P_m # Update covariance estimate
|
|
61
|
+
|
|
62
|
+
except np.linalg.LinAlgError as e:
|
|
63
|
+
self.logger.warning(f'Unable to compute state, keeping previous one. Error: {e}')
|
|
64
|
+
|
|
65
|
+
# Compute postfit residuals
|
|
66
|
+
r = y_k - self.model.to_observations(self.x, **kwargs).y_m
|
|
67
|
+
|
|
68
|
+
self.state_handler.process_state(self.x, self.P, postfits=r, **kwargs)
|
|
69
|
+
|
|
70
|
+
def _time_update(self) -> Tuple[np.array, np.array]:
|
|
71
|
+
"""
|
|
72
|
+
Perform a time update step
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
Phi = self.model.Phi
|
|
76
|
+
|
|
77
|
+
x_m = self.model.propagate_state(self.x)
|
|
78
|
+
P_m = Phi @ self.P @ Phi.T + self.Q
|
|
79
|
+
|
|
80
|
+
return x_m, P_m
|
pygnss/filter/models.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
from . import Model, ModelObs
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class RangePositioning2D(Model):
|
|
7
|
+
"""
|
|
8
|
+
Basic 2D range-based positioning model
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, Phi: np.array, nodes: np.array):
|
|
12
|
+
"""
|
|
13
|
+
Instantiate a RangePositioning2D
|
|
14
|
+
|
|
15
|
+
:param Phi: a 2 x 2 matrix that propagates the state from k-1 to k
|
|
16
|
+
:param nodes: list of nodes of the positioning system, from which the
|
|
17
|
+
range will be computed
|
|
18
|
+
"""
|
|
19
|
+
self._Phi = Phi
|
|
20
|
+
self.nodes = nodes
|
|
21
|
+
|
|
22
|
+
def propagate_state(self, state: np.array):
|
|
23
|
+
"""
|
|
24
|
+
Propagate the state from k-1 to k
|
|
25
|
+
|
|
26
|
+
>>> Phi = np.eye(2)
|
|
27
|
+
>>> nodes = np.array([[0, 0], [0, 10], [10, 0]])
|
|
28
|
+
>>> model = RangePositioning2D(Phi, nodes)
|
|
29
|
+
>>> state_m = np.array([1, 2])
|
|
30
|
+
>>> model.propagate_state(state_m)
|
|
31
|
+
array([1., 2.])
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
return np.dot(self._Phi, state)
|
|
35
|
+
|
|
36
|
+
def to_observations(self, state: np.array, compute_jacobian: bool = False) -> ModelObs:
|
|
37
|
+
"""
|
|
38
|
+
Convert the state into observations using a range based 2D positioning model
|
|
39
|
+
|
|
40
|
+
>>> Phi = np.eye(2)
|
|
41
|
+
>>> nodes = np.array([[0, 0], [0, 10], [10, 0]])
|
|
42
|
+
>>> model = RangePositioning2D(Phi, nodes)
|
|
43
|
+
>>> state_m = np.array([1, 2])
|
|
44
|
+
>>> model.to_observations(state_m)
|
|
45
|
+
(array([2.23606798, 8.06225775, 9.21954446]), None)
|
|
46
|
+
|
|
47
|
+
>>> model.to_observations(state_m, compute_jacobian=True)
|
|
48
|
+
(array([2.23606798, 8.06225775, 9.21954446]), array([[ 0.4472136 , 0.89442719],
|
|
49
|
+
[ 0.12403473, -0.99227788],
|
|
50
|
+
[-0.97618706, 0.21693046]]))
|
|
51
|
+
"""
|
|
52
|
+
rho = state - self.nodes
|
|
53
|
+
ranges = np.sqrt(np.sum(np.power(rho, 2), axis=1))
|
|
54
|
+
|
|
55
|
+
H = None
|
|
56
|
+
|
|
57
|
+
if compute_jacobian is True:
|
|
58
|
+
H = rho / ranges[:, np.newaxis]
|
|
59
|
+
|
|
60
|
+
return ranges, H
|
|
61
|
+
|
|
62
|
+
def Phi(self):
|
|
63
|
+
"""
|
|
64
|
+
Get the state transition matrix
|
|
65
|
+
|
|
66
|
+
>>> Phi = np.eye(2)
|
|
67
|
+
>>> nodes = np.array([[0, 0], [0, 10], [10, 0]])
|
|
68
|
+
>>> model = RangePositioning2D(Phi, nodes)
|
|
69
|
+
>>> model.Phi()
|
|
70
|
+
array([[1., 0.],
|
|
71
|
+
[0., 1.]])
|
|
72
|
+
"""
|
|
73
|
+
return self._Phi
|