enode-host 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
enode_host/protocol.py ADDED
@@ -0,0 +1,311 @@
1
+ import enum
2
+ import struct
3
+ from dataclasses import dataclass
4
+ from typing import Optional
5
+
6
+
7
+ class MsgType(enum.IntEnum):
8
+ COMMAND = 0x10
9
+ STATUS = 0x20
10
+ ACK = 0x30
11
+ DATA = 0x40
12
+ PPS = 0x41
13
+ SD_STREAM = 0x50
14
+ SD_DONE = 0x51
15
+ GNSS = 0x60
16
+
17
+
18
+ class CommandId(enum.IntEnum):
19
+ START_DAQ = 0x01
20
+ STOP_DAQ = 0x02
21
+ SET_MODE = 0x03
22
+ START_REALTIME_STREAM = 0x04
23
+ STOP_REALTIME_STREAM = 0x05
24
+ START_PAST_STREAM = 0x06
25
+ STOP_PAST_STREAM = 0x07
26
+ START_SD_STREAM = 0x08
27
+ STOP_SD_STREAM = 0x09
28
+ SD_CLEAR = 0x0A
29
+ SET_DAQ_MODE = 0x0B
30
+
31
+
32
+ class Mode(enum.IntEnum):
33
+ REALTIME = 0
34
+ PAST = 1
35
+
36
+
37
+ class Toggle(enum.IntEnum):
38
+ STOP = 0
39
+ START = 1
40
+
41
+
42
+ @dataclass
43
+ class StatusReport:
44
+ node_type: int
45
+ node_number: int
46
+ level: int
47
+ parent_mac: bytes
48
+ self_mac: bytes
49
+ rssi: int
50
+ acc_model: int = 0
51
+ daq_mode: int = 0
52
+ daq_on: int = 0
53
+ stream_status: int = 0
54
+
55
+
56
+ @dataclass
57
+ class AccSample:
58
+ cc: int
59
+ ax: float
60
+ ay: float
61
+ az: float
62
+
63
+
64
+ @dataclass
65
+ class AccBatch:
66
+ node_type: int
67
+ node_number: int
68
+ samples: list[AccSample]
69
+
70
+
71
+ @dataclass
72
+ class PpsReport:
73
+ node_type: int
74
+ node_number: int
75
+ cc: int
76
+ epoch: int
77
+
78
+
79
+ @dataclass
80
+ class SdStreamChunk:
81
+ node_type: int
82
+ node_number: int
83
+ file_time: int
84
+ offset: int
85
+ data: bytes
86
+
87
+
88
+ @dataclass
89
+ class SdStreamDone:
90
+ node_type: int
91
+ node_number: int
92
+ file_time: int
93
+ status: int
94
+
95
+
96
+ @dataclass
97
+ class GnssReport:
98
+ node_type: int
99
+ node_number: int
100
+ latitude: float
101
+ longitude: float
102
+ fix_mode: int
103
+ valid: int
104
+
105
+
106
+ @dataclass
107
+ class Ack:
108
+ original_type: int
109
+ status: int
110
+ message: str = ""
111
+
112
+
113
+ def pack_frame(msg_type: int, payload: bytes) -> bytes:
114
+ if len(payload) > 0xFFFF:
115
+ raise ValueError("payload too large")
116
+ header = struct.pack(">BH", int(msg_type), len(payload)
117
+ )
118
+ return header + payload
119
+
120
+
121
+ def build_command(command_id: CommandId, payload: bytes = b"") -> bytes:
122
+ return pack_frame(MsgType.COMMAND, bytes([int(command_id)]) + payload)
123
+
124
+
125
+ def build_ack(original_type: int, status: int, message: str = "") -> bytes:
126
+ msg = message.encode("utf-8")
127
+ payload = struct.pack(">BBH", int(original_type), int(status), len(msg)) + msg
128
+ return pack_frame(MsgType.ACK, payload)
129
+
130
+
131
+ def parse_ack(payload: bytes) -> Ack:
132
+ if len(payload) < 4:
133
+ raise ValueError(f"invalid ack payload length {len(payload)}")
134
+ original_type, status, msg_len = struct.unpack(">BBH", payload[:4])
135
+ message = payload[4:4 + msg_len].decode("utf-8", errors="replace")
136
+ return Ack(original_type=original_type, status=status, message=message)
137
+
138
+
139
+ def build_sd_stream_start(hours: int) -> bytes:
140
+ payload = struct.pack(">H", int(hours))
141
+ return build_command(CommandId.START_SD_STREAM, payload)
142
+
143
+
144
+ def build_sd_stream_stop() -> bytes:
145
+ return build_command(CommandId.STOP_SD_STREAM, b"")
146
+
147
+
148
+ def build_sd_clear() -> bytes:
149
+ return build_command(CommandId.SD_CLEAR, b"")
150
+
151
+
152
+ def build_set_daq_mode(mode: int) -> bytes:
153
+ return build_command(CommandId.SET_DAQ_MODE, bytes([int(mode) & 0xFF]))
154
+
155
+
156
+ def parse_status(payload: bytes) -> StatusReport:
157
+ if len(payload) not in (16, 18, 19, 20):
158
+ raise ValueError(f"invalid status payload length {len(payload)}")
159
+ node_type = payload[0]
160
+ node_number = payload[1]
161
+ level = payload[2]
162
+ parent_mac = payload[3:9]
163
+ self_mac = payload[9:15]
164
+ rssi = struct.unpack(">b", payload[15:16])[0]
165
+ acc_model = payload[16] if len(payload) >= 18 else 0
166
+ daq_mode = payload[17] if len(payload) >= 18 else 0
167
+ daq_on = payload[18] if len(payload) >= 19 else 0
168
+ stream_status = payload[19] if len(payload) >= 20 else 0
169
+ return StatusReport(
170
+ node_type=node_type,
171
+ node_number=node_number,
172
+ level=level,
173
+ parent_mac=parent_mac,
174
+ self_mac=self_mac,
175
+ rssi=rssi,
176
+ acc_model=acc_model,
177
+ daq_mode=daq_mode,
178
+ daq_on=daq_on,
179
+ stream_status=stream_status,
180
+ )
181
+
182
+
183
+ def parse_sd_chunk(payload: bytes) -> SdStreamChunk:
184
+ if len(payload) < 12:
185
+ raise ValueError(f"invalid sd chunk payload length {len(payload)}")
186
+ node_type = payload[0]
187
+ node_number = payload[1]
188
+ file_time = struct.unpack(">I", payload[2:6])[0]
189
+ offset = struct.unpack(">I", payload[6:10])[0]
190
+ size = struct.unpack(">H", payload[10:12])[0]
191
+ if len(payload) < 12 + size:
192
+ raise ValueError("sd chunk length mismatch")
193
+ data = payload[12 : 12 + size]
194
+ return SdStreamChunk(
195
+ node_type=node_type,
196
+ node_number=node_number,
197
+ file_time=file_time,
198
+ offset=offset,
199
+ data=data,
200
+ )
201
+
202
+
203
+ def parse_sd_done(payload: bytes) -> SdStreamDone:
204
+ if len(payload) != 7:
205
+ raise ValueError(f"invalid sd done payload length {len(payload)}")
206
+ node_type = payload[0]
207
+ node_number = payload[1]
208
+ file_time = struct.unpack(">I", payload[2:6])[0]
209
+ status = payload[6]
210
+ return SdStreamDone(
211
+ node_type=node_type,
212
+ node_number=node_number,
213
+ file_time=file_time,
214
+ status=status,
215
+ )
216
+
217
+
218
+ def parse_gnss(payload: bytes) -> GnssReport:
219
+ if len(payload) != 12:
220
+ raise ValueError(f"invalid gnss payload length {len(payload)}")
221
+ node_type = payload[0]
222
+ node_number = payload[1]
223
+ latitude, longitude = struct.unpack(">ff", payload[2:10])
224
+ fix_mode = payload[10]
225
+ valid = payload[11]
226
+ return GnssReport(
227
+ node_type=node_type,
228
+ node_number=node_number,
229
+ latitude=latitude,
230
+ longitude=longitude,
231
+ fix_mode=fix_mode,
232
+ valid=valid,
233
+ )
234
+
235
+
236
+ _ACC_SAMPLE_STRUCT = struct.Struct(">Qfff")
237
+ _PPS_STRUCT = struct.Struct(">Qq")
238
+
239
+
240
+ def build_acc_batch(node_type: int, node_number: int, samples: list[AccSample]) -> bytes:
241
+ if len(samples) > 255:
242
+ raise ValueError("too many samples")
243
+ payload = bytearray()
244
+ payload.extend(bytes([node_type, node_number, len(samples)]))
245
+ for sample in samples:
246
+ payload.extend(_ACC_SAMPLE_STRUCT.pack(sample.cc, sample.ax, sample.ay, sample.az))
247
+ return pack_frame(MsgType.DATA, bytes(payload))
248
+
249
+
250
+ def parse_acc_batch(payload: bytes) -> AccBatch:
251
+ if len(payload) < 3:
252
+ raise ValueError("invalid data payload length")
253
+ node_type = payload[0]
254
+ node_number = payload[1]
255
+ count = payload[2]
256
+ expected = 3 + count * _ACC_SAMPLE_STRUCT.size
257
+ if len(payload) != expected:
258
+ raise ValueError(f"invalid data payload length {len(payload)}")
259
+ samples: list[AccSample] = []
260
+ offset = 3
261
+ for _ in range(count):
262
+ cc, ax, ay, az = _ACC_SAMPLE_STRUCT.unpack_from(payload, offset)
263
+ samples.append(AccSample(cc=cc, ax=ax, ay=ay, az=az))
264
+ offset += _ACC_SAMPLE_STRUCT.size
265
+ return AccBatch(node_type=node_type, node_number=node_number, samples=samples)
266
+
267
+
268
+ def build_pps(node_type: int, node_number: int, cc: int, epoch: int) -> bytes:
269
+ payload = bytes([node_type, node_number]) + _PPS_STRUCT.pack(cc, epoch)
270
+ return pack_frame(MsgType.PPS, payload)
271
+
272
+
273
+ def parse_pps(payload: bytes) -> PpsReport:
274
+ if len(payload) != 18:
275
+ raise ValueError(f"invalid PPS payload length {len(payload)}")
276
+ node_type = payload[0]
277
+ node_number = payload[1]
278
+ cc, epoch = _PPS_STRUCT.unpack(payload[2:])
279
+ return PpsReport(node_type=node_type, node_number=node_number, cc=cc, epoch=epoch)
280
+
281
+
282
+ def format_mac(mac: bytes) -> str:
283
+ return ":".join(f"{b:02X}" for b in mac)
284
+
285
+
286
+ def build_set_mode(mode: Mode) -> bytes:
287
+ return build_command(CommandId.SET_MODE, bytes([int(mode)]))
288
+
289
+
290
+ def build_start_daq() -> bytes:
291
+ return build_command(CommandId.START_DAQ)
292
+
293
+
294
+ def build_stop_daq() -> bytes:
295
+ return build_command(CommandId.STOP_DAQ)
296
+
297
+
298
+ def build_realtime_stream(toggle: Toggle) -> bytes:
299
+ command_id = (
300
+ CommandId.START_REALTIME_STREAM if toggle == Toggle.START else CommandId.STOP_REALTIME_STREAM
301
+ )
302
+ return build_command(command_id)
303
+
304
+
305
+ def build_past_stream(toggle: Toggle, start_ms: Optional[int] = None, end_ms: Optional[int] = None) -> bytes:
306
+ if toggle == Toggle.START:
307
+ if start_ms is None or end_ms is None:
308
+ raise ValueError("start_ms and end_ms required when starting past stream")
309
+ payload = struct.pack(">QQ", int(start_ms), int(end_ms))
310
+ return build_command(CommandId.START_PAST_STREAM, payload)
311
+ return build_command(CommandId.STOP_PAST_STREAM)
@@ -0,0 +1,139 @@
1
+ # PSD/ASD CALCULATION CLASS: RECURSIVE VERSION
2
+
3
+ # AUTHOR:
4
+ # K Y KOO (k.y.koo@exeter.ac.uk)
5
+ #
6
+ # HISTORY
7
+ # - Initial version v1 on 12 Sep 2024
8
+ #
9
+
10
+
11
+ from numpy import exp, empty, sqrt, mean, zeros, random, sqrt, arange, sin, stack, pi, append
12
+ from scipy.signal import windows
13
+ from scipy.fft import fft
14
+ from matplotlib.pyplot import figure, clf, semilogy, grid, xlabel, ylabel, legend
15
+
16
+
17
+ class PSD_Recursive():
18
+
19
+ def __init__(self, nfft, fs):
20
+
21
+ self.nfft = nfft
22
+ self.freq = arange(0, fs/2, fs/nfft)
23
+ self.dt = 1/fs
24
+ self.X = empty((0, 3), dtype=float)
25
+ self.Xf = zeros((nfft, 3), dtype=float)
26
+ self.window = windows.hann(self.nfft) / sqrt(mean(windows.hann(self.nfft)**2))
27
+ self.Lambda = 0.01
28
+ self.w = [exp(-self.Lambda), 1 - exp(-self.Lambda)] # exponential weighting factor
29
+ self.n = 1 # No of terms used to get the weighted averaged PSD
30
+ self.isUpdated = False
31
+ self.last_sample_time = None
32
+ self.last_update_time = None
33
+ self.sample_dt = self.dt
34
+
35
+ def push(self, x, ts=None):
36
+
37
+ # append
38
+ self.X = append(self.X, x, axis = 0)
39
+ # print(ts[-1])
40
+ if ts is not None and len(ts) >= 1:
41
+ # print(ts[-1])
42
+ self.last_sample_time = ts[-1]
43
+ if len(ts) >= 2:
44
+ self.sample_dt = (ts[-1] - ts[-2]).total_seconds()
45
+
46
+ # if len() > nfft, perform fft
47
+ while self.X.shape[0] > self.nfft:
48
+
49
+ x_ = self.X[:self.nfft, :].T * self.window
50
+ x_f = fft(x_).T
51
+ # add the result to the Xf with a forgetting factor exp(-lambda)
52
+ weights = self.get_weights()
53
+ self.Xf = self.Xf * weights[0] + abs(x_f)**2 * weights[1]
54
+ self.X = self.X[self.nfft:, :]
55
+ self.isUpdated = True
56
+ if ts is not None and len(ts) > 0:
57
+ self.last_update_time = ts[-1]
58
+ # pass
59
+
60
+ def get_weights(self):
61
+
62
+ # (n-1)/n * exp(-lambda) , 1 - (n-1)/n*exp(-lambda)
63
+ weights = [(self.n - 1)/self.n * exp(-self.Lambda), 1 - (self.n - 1)/self.n * exp(-self.Lambda)]
64
+ self.n += 1
65
+ return weights
66
+
67
+ def get_psd(self):
68
+
69
+ self.isUpdated = False
70
+ return self.freq, self.Xf[:int(self.nfft/2), :] * self.dt * 2 / self.nfft
71
+
72
+ def get_asd(self):
73
+
74
+ f, psd = self.get_psd()
75
+ return f, sqrt(psd)
76
+
77
+ def seconds_until_next_update(self):
78
+ # Estimate time until enough samples for next FFT window (data-time based).
79
+ remaining = max(self.nfft - self.X.shape[0], 0)
80
+ dt = self.sample_dt if self.sample_dt else self.dt
81
+ return remaining * dt
82
+
83
+ def set_fs(self, fs):
84
+ try:
85
+ fs_val = float(fs)
86
+ except (TypeError, ValueError):
87
+ return
88
+ if fs_val <= 0:
89
+ return
90
+ self.freq = arange(0, fs_val / 2, fs_val / self.nfft)
91
+ self.dt = 1 / fs_val
92
+ self.X = empty((0, 3), dtype=float)
93
+ self.Xf = zeros((self.nfft, 3), dtype=float)
94
+ self.window = windows.hann(self.nfft) / sqrt(mean(windows.hann(self.nfft) ** 2))
95
+ self.n = 1
96
+ self.isUpdated = False
97
+ self.last_sample_time = None
98
+ self.last_update_time = None
99
+ self.sample_dt = self.dt
100
+
101
+
102
+
103
+
104
+
105
+
106
+ if __name__ == '__main__':
107
+
108
+ rng = random.default_rng()
109
+
110
+ fs = 10e3
111
+ N = 1e5
112
+ amp = 2*sqrt(2)
113
+ freq = 1234.0
114
+ noise_power = 0.001 * fs / 2
115
+ time = arange(N) / fs
116
+ x = amp*sin(2*pi*freq*time)
117
+ x += rng.normal(scale=sqrt(noise_power), size=time.shape)
118
+ y = amp*sin(2*pi*1000*time)
119
+ z = amp*sin(2*pi*2000*time)
120
+ y += rng.normal(scale=sqrt(noise_power), size=time.shape)
121
+ z += rng.normal(scale=sqrt(noise_power), size=time.shape)
122
+
123
+ X = stack((x, y, z), axis = 0).T
124
+
125
+ NFFT = 1024
126
+
127
+ psd = PSD_Recursive(NFFT, fs)
128
+ for i in range(int(X.shape[0]/2)):
129
+ psd.push(X[2*i:2*(i+1), :])
130
+
131
+ figure()
132
+ clf()
133
+ f, psd = psd.get_psd()
134
+
135
+ semilogy(f, psd, label=['x', 'y', 'z'])
136
+ grid(True)
137
+ xlabel('Frequency (Hz)')
138
+ ylabel('PSD (g^2/Hz)')
139
+ legend()
enode_host/queues.py ADDED
@@ -0,0 +1,11 @@
1
+ from queue import Queue
2
+ try:
3
+ from . import config
4
+ except ImportError:
5
+ import config
6
+
7
+ Mesh2GUI = Queue(maxsize = 1000)
8
+ Mesh2Storage = Queue(maxsize = 1000)
9
+ RemoCons = []
10
+ for i in range(config.NNODES):
11
+ RemoCons.append(Queue(maxsize = 10))
@@ -0,0 +1,206 @@
1
+ from numpy import ceil
2
+ import datetime
3
+ from numpy import floor
4
+ import logging
5
+ import numpy as np
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ class Resampling:
11
+ def __init__(self, SamplingFrequency):
12
+ self.Fs = SamplingFrequency # Hz
13
+ self.Ts = int(1000/SamplingFrequency) # msec
14
+ self.t0 = -1
15
+ self.t1 = -1
16
+ self.tk = -1
17
+ self.y0 = []
18
+ self.y1 = []
19
+
20
+ def get_fs(self):
21
+ logger.info("fs={}".format(self.Fs))
22
+
23
+ def set_fs(self, sampling_frequency):
24
+ try:
25
+ fs = float(sampling_frequency)
26
+ except (TypeError, ValueError):
27
+ return
28
+ if fs <= 0:
29
+ return
30
+ self.Fs = fs
31
+ self.Ts = int(1000 / fs)
32
+ # Reset state to avoid mixing old timing grid with new Fs
33
+ self.t0 = -1
34
+ self.t1 = -1
35
+ self.tk = -1
36
+ self.y0 = []
37
+ self.y1 = []
38
+
39
+ @staticmethod
40
+ def _as_datetime(value):
41
+ if isinstance(value, datetime.datetime):
42
+ return value
43
+ if hasattr(value, "to_pydatetime"):
44
+ try:
45
+ return value.to_pydatetime()
46
+ except Exception:
47
+ pass
48
+ if isinstance(value, np.datetime64):
49
+ try:
50
+ ts = value.astype("datetime64[us]").astype("int64") / 1e6
51
+ return datetime.datetime.fromtimestamp(ts, tz=datetime.timezone.utc)
52
+ except Exception:
53
+ return None
54
+ try:
55
+ return datetime.datetime.fromtimestamp(float(value), tz=datetime.timezone.utc)
56
+ except Exception:
57
+ return None
58
+
59
+ def _coerce_state_time(self, value):
60
+ if value == -1:
61
+ return -1
62
+ if isinstance(value, datetime.datetime):
63
+ return value
64
+ coerced = self._as_datetime(value)
65
+ return coerced if coerced is not None else -1
66
+
67
+ def get_next_grid_time(self, t):
68
+ if not isinstance(t, datetime.datetime):
69
+ t = self._as_datetime(t)
70
+ if t is None:
71
+ return None
72
+ tz = t.tzinfo or datetime.timezone.utc
73
+ epoch = int(t.timestamp())
74
+ usec = t.microsecond
75
+ usec_k = (int(usec / (self.Ts * 1000)) + 1) * (self.Ts * 1000)
76
+ if usec_k >= 1000000:
77
+ epoch += 1
78
+ usec_k -= 1000000
79
+ tk = datetime.datetime.fromtimestamp(epoch, tz=tz)
80
+ tk = tk.replace(microsecond = usec_k)
81
+ return tk
82
+
83
+ def push(self, t, y):
84
+ t_rs = []
85
+ y_rs = []
86
+ for t_, y_ in zip(t, y):
87
+ t_dt = t_ if isinstance(t_, datetime.datetime) else self._as_datetime(t_)
88
+ if t_dt is None:
89
+ continue
90
+ t_ = t_dt
91
+ self.t0 = self._coerce_state_time(self.t0)
92
+ self.t1 = self._coerce_state_time(self.t1)
93
+ self.tk = self._coerce_state_time(self.tk)
94
+ if self.t0 == -1:
95
+ self.t0 = t_
96
+ self.y0 = y_
97
+ self.tk = self.get_next_grid_time(t_)
98
+ if self.tk is None:
99
+ self.t0 = -1
100
+ continue
101
+ # starting tk logging disabled per request
102
+ elif self.tk != -1 and t_ < self.tk:
103
+ self.t0 = t_
104
+ self.y0 = y_
105
+ else:
106
+ self.t1 = t_
107
+ self.y1 = y_
108
+ dt = (self.t1 - self.t0).total_seconds()
109
+ if dt <= 0:
110
+ self.t0 = self.t1
111
+ self.y0 = self.y1
112
+ continue
113
+ while self.tk != -1 and self.tk < self.t1:
114
+ dt0 = (self.tk - self.t0).total_seconds()
115
+ dt1 = (self.t1 - self.tk).total_seconds()
116
+ yk = [y0i * dt1/dt + y1i * dt0/dt for y0i, y1i in zip(self.y0, self.y1)]
117
+ y_rs.append(yk)
118
+ # convert timestamp float to datetime
119
+ t_rs.append(self.tk)
120
+ # self.tk += datetime.timedelta(milliseconds = self.Ts)
121
+ self.tk = self.get_next_grid_time(self.tk)
122
+ if self.tk is None:
123
+ self.tk = -1
124
+ break
125
+ self.t0 = self.t1
126
+ self.y0 = self.y1
127
+
128
+ return t_rs, y_rs
129
+
130
+
131
+
132
+
133
+
134
+ if __name__ == '__main__':
135
+
136
+ # TEST 1
137
+ # a = resampling(100)
138
+ # b = resampling(200)
139
+ # a.get_fs()
140
+ # b.get_fs()
141
+
142
+ # TEST 2
143
+ # buf = {}
144
+ # buf[1] = resampling(10)
145
+ # buf[2] = resampling(20)
146
+ # for key in buf.keys():
147
+ # buf[key].get_fs()
148
+
149
+ # # TEST 3
150
+ # if 0:
151
+ # from numpy import *
152
+ # from matplotlib.pyplot import *
153
+ # xi = sort(random.rand(210,1) * 0.5, axis=0)
154
+ # yi = hstack((sin(2*pi*1*xi), cos(2*pi*1*xi)))
155
+ # # yi = sin(2*pi*1*xi)
156
+ # Fs = 70
157
+ # # print(xi)
158
+ # TK = []
159
+ # VK = []
160
+ # resampler = resampling(Fs)
161
+ # for idx in range(int(xi.shape[0]/2)):
162
+ # # print(xi[idx, 0])
163
+ # t_, v_ = resampler.push(xi[2*idx:2*idx + 1, 0], list(yi[2*idx:2*idx + 1,:]))
164
+ # for idx, t in enumerate(t_):
165
+ # TK.append(t_[idx])
166
+ # VK.append(v_[idx])
167
+ # TK = array(TK)
168
+ # VK = array(VK)
169
+ # # print(TK)
170
+ # # ion()
171
+ # figure(1)
172
+ # # clf()
173
+ # plot(xi, yi, '.')
174
+ # plot(TK, VK, 'o-')
175
+ # show()
176
+
177
+ # TEST 4
178
+ if 1:
179
+ import datetime
180
+ from numpy import *
181
+ from matplotlib.pyplot import *
182
+
183
+ T = []
184
+ Y = []
185
+
186
+ resampler = resampling(62.5)
187
+ t = [datetime.datetime(2024,1,1,0,0,0, i*1000) for i in range(0, 100, 20)]
188
+ y = [[j, j+1] for j in range(5)]
189
+
190
+ tr, yr = resampler.push(t, y)
191
+ T += tr
192
+ Y += yr
193
+
194
+ for t_ in T:
195
+ print(t_)
196
+
197
+
198
+ figure(1)
199
+ # ion()
200
+ plot(t,y, '.')
201
+ plot(T, Y, 'o-')
202
+ show()
203
+
204
+
205
+
206
+
@@ -0,0 +1,47 @@
1
+ import threading
2
+ import logging
3
+ try:
4
+ from . import queues
5
+ except ImportError:
6
+ import queues
7
+ try:
8
+ from . import resampling
9
+ except ImportError:
10
+ import resampling
11
+ try:
12
+ from . import storage
13
+ except ImportError:
14
+ import storage
15
+ import time
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ Resamplers = {}
20
+ Storage = storage.Storage(600) # 600 sec
21
+
22
+ def update():
23
+ # this function
24
+ # - reads the queue and
25
+ # - resamples the measurement
26
+ # - push resampled data into storage
27
+
28
+ while True:
29
+ if not queues.Mesh2Storage.empty():
30
+ # logger.info("message received for t, a")
31
+ item = queues.Mesh2Storage.get()
32
+ nodeID = item['nodeID'] # starting from 1 representing ACC1
33
+ t = item['t']
34
+ y = item['acc']
35
+ if not nodeID in Resamplers.keys():
36
+ Resamplers[nodeID] = resampling.resampling(62.5)
37
+ t_rs, y_rs = Resamplers[nodeID].push(t, y)
38
+ # logger.info("resampled timestamp = {}".format(t_rs[0]))
39
+ Storage.push(nodeID, t_rs, y_rs)
40
+ else:
41
+ time.sleep(0.01)
42
+ def begin():
43
+
44
+ # start a thread to run update() function
45
+ thread = threading.Thread(target = update)
46
+ thread.start()
47
+ logger.info("sigproc update started.")