acoular 24.7__py3-none-any.whl → 25.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- acoular/__init__.py +21 -9
- acoular/aiaa/__init__.py +12 -0
- acoular/{tools → aiaa}/aiaa.py +26 -31
- acoular/base.py +332 -0
- acoular/calib.py +129 -34
- acoular/configuration.py +13 -11
- acoular/demo/__init__.py +1 -0
- acoular/demo/acoular_demo.py +30 -17
- acoular/deprecation.py +85 -0
- acoular/environments.py +38 -24
- acoular/fastFuncs.py +90 -84
- acoular/fbeamform.py +342 -387
- acoular/fprocess.py +376 -0
- acoular/grids.py +122 -150
- acoular/h5cache.py +29 -40
- acoular/h5files.py +2 -6
- acoular/microphones.py +50 -59
- acoular/process.py +771 -0
- acoular/sdinput.py +35 -21
- acoular/signals.py +120 -113
- acoular/sources.py +208 -234
- acoular/spectra.py +59 -254
- acoular/tbeamform.py +280 -280
- acoular/tfastfuncs.py +21 -21
- acoular/tools/__init__.py +3 -7
- acoular/tools/helpers.py +218 -4
- acoular/tools/metrics.py +5 -5
- acoular/tools/utils.py +116 -0
- acoular/tprocess.py +416 -741
- acoular/traitsviews.py +15 -13
- acoular/trajectory.py +7 -10
- acoular/version.py +2 -2
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/METADATA +63 -21
- acoular-25.1.dist-info/RECORD +56 -0
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/WHEEL +1 -1
- acoular-24.7.dist-info/RECORD +0 -50
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/licenses/AUTHORS.rst +0 -0
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/licenses/LICENSE +0 -0
acoular/process.py
ADDED
|
@@ -0,0 +1,771 @@
|
|
|
1
|
+
# ------------------------------------------------------------------------------
|
|
2
|
+
# Copyright (c) Acoular Development Team.
|
|
3
|
+
# ------------------------------------------------------------------------------
|
|
4
|
+
"""General purpose blockwise processing methods independent of the domain (time or frequency).
|
|
5
|
+
|
|
6
|
+
.. autosummary::
|
|
7
|
+
:toctree: generated/
|
|
8
|
+
|
|
9
|
+
Average
|
|
10
|
+
Cache
|
|
11
|
+
SampleSplitter
|
|
12
|
+
TimeAverage
|
|
13
|
+
TimeCache
|
|
14
|
+
SamplesBuffer
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import threading
|
|
18
|
+
from collections import deque
|
|
19
|
+
from inspect import currentframe
|
|
20
|
+
from warnings import warn
|
|
21
|
+
|
|
22
|
+
import numpy as np
|
|
23
|
+
from traits.api import Any, Array, Bool, Dict, Enum, Instance, Int, Property, Union, cached_property, on_trait_change
|
|
24
|
+
|
|
25
|
+
# acoular imports
|
|
26
|
+
from .base import Generator, InOut
|
|
27
|
+
from .configuration import config
|
|
28
|
+
from .deprecation import deprecated_alias
|
|
29
|
+
from .h5cache import H5cache
|
|
30
|
+
from .h5files import H5CacheFileBase
|
|
31
|
+
from .internal import digest
|
|
32
|
+
from .tools.utils import find_basename
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class LockedGenerator:
|
|
36
|
+
"""Creates a Thread Safe Iterator.
|
|
37
|
+
Takes an iterator/generator and makes it thread-safe by
|
|
38
|
+
serializing call to the `next` method of given iterator/generator.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self, it):
|
|
42
|
+
self.it = it
|
|
43
|
+
self.lock = threading.Lock()
|
|
44
|
+
|
|
45
|
+
def __next__(self):
|
|
46
|
+
with self.lock:
|
|
47
|
+
return self.it.__next__()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@deprecated_alias({'naverage': 'num_per_average', 'numsamples': 'num_samples'}, read_only=['numsamples'])
|
|
51
|
+
class Average(InOut):
|
|
52
|
+
"""Calculates the average across consecutive time samples or frequency snapshots.
|
|
53
|
+
|
|
54
|
+
The average operation is performed differently depending on the source type.
|
|
55
|
+
If the source is a time domain source
|
|
56
|
+
(e.g. derived from :class:`~acoular.base.SamplesGenerator`), the average is
|
|
57
|
+
calculated over a certain number of time samples given by :attr:`num_per_average`.
|
|
58
|
+
If the source is a frequency domain source (e.g. derived from
|
|
59
|
+
:class:`~acoular.base.SpectraGenerator`), the average is calculated over a certain
|
|
60
|
+
number of snapshots given by :attr:`num_per_average`.
|
|
61
|
+
|
|
62
|
+
Examples
|
|
63
|
+
--------
|
|
64
|
+
For estimate the RMS of a white noise (time-domain) signal, the average of the squared
|
|
65
|
+
signal can be calculated:
|
|
66
|
+
|
|
67
|
+
>>> import acoular as ac
|
|
68
|
+
>>> import numpy as np
|
|
69
|
+
>>>
|
|
70
|
+
>>> signal = ac.WNoiseGenerator(sample_freq=51200, num_samples=51200, rms=2.0).signal()
|
|
71
|
+
>>> ts = ac.TimeSamples(data=signal[:, np.newaxis], sample_freq=51200)
|
|
72
|
+
>>> tp = ac.TimePower(source=ts)
|
|
73
|
+
>>> avg = ac.Average(source=tp, num_per_average=512)
|
|
74
|
+
>>> mean_squared_value = next(avg.result(num=1))
|
|
75
|
+
>>> rms = np.sqrt(mean_squared_value)[0, 0]
|
|
76
|
+
>>> print(rms)
|
|
77
|
+
1.9985200025816718
|
|
78
|
+
|
|
79
|
+
Here, each evaluation of the generator created by the :meth:`result` method of the
|
|
80
|
+
:class:`Average` object via the :meth:`next` function returns :code:`num=1` average across a
|
|
81
|
+
snapshot of 512 samples.
|
|
82
|
+
|
|
83
|
+
If the source is a frequency domain source, the average is calculated over a certain number
|
|
84
|
+
of snapshots, defined by :attr:`num_per_average`.
|
|
85
|
+
|
|
86
|
+
>>> fft = ac.RFFT(source=ts, block_size=64)
|
|
87
|
+
>>> ps = ac.AutoPowerSpectra(source=fft)
|
|
88
|
+
>>> avg = ac.Average(source=ps, num_per_average=16)
|
|
89
|
+
>>> mean_power = next(avg.result(num=1))
|
|
90
|
+
>>> print(np.sqrt(mean_power.sum()))
|
|
91
|
+
2.0024960894399295
|
|
92
|
+
|
|
93
|
+
Here, the generator created by the :meth:`result` method of the :class:`Average` object
|
|
94
|
+
returns the average across 16 snapshots in the frequency domain.
|
|
95
|
+
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
#: Number of samples (time domain source) or snapshots (frequency domain source)
|
|
99
|
+
#: to average over, defaults to 64.
|
|
100
|
+
num_per_average = Int(64, desc='number of samples/snapshots to average over')
|
|
101
|
+
|
|
102
|
+
#: Sampling frequency of the output signal, is set automatically.
|
|
103
|
+
sample_freq = Property(depends_on=['source.sample_freq', 'num_per_average'])
|
|
104
|
+
|
|
105
|
+
#: Number of samples (time domain) or snapshots (frequency domain) of the output signal.
|
|
106
|
+
#: Is set automatically.
|
|
107
|
+
num_samples = Property(depends_on=['source.num_samples', 'num_per_average'])
|
|
108
|
+
|
|
109
|
+
# internal identifier
|
|
110
|
+
digest = Property(depends_on=['source.digest', 'num_per_average'])
|
|
111
|
+
|
|
112
|
+
@cached_property
|
|
113
|
+
def _get_digest(self):
|
|
114
|
+
return digest(self)
|
|
115
|
+
|
|
116
|
+
@cached_property
|
|
117
|
+
def _get_sample_freq(self):
|
|
118
|
+
if self.source:
|
|
119
|
+
return 1.0 * self.source.sample_freq / self.num_per_average
|
|
120
|
+
return None
|
|
121
|
+
|
|
122
|
+
@cached_property
|
|
123
|
+
def _get_num_samples(self):
|
|
124
|
+
if self.source:
|
|
125
|
+
return self.source.num_samples / self.num_per_average
|
|
126
|
+
return None
|
|
127
|
+
|
|
128
|
+
def result(self, num):
|
|
129
|
+
"""Python generator that yields the output block-wise.
|
|
130
|
+
|
|
131
|
+
Parameters
|
|
132
|
+
----------
|
|
133
|
+
num : integer
|
|
134
|
+
This parameter defines the size of the blocks to be yielded
|
|
135
|
+
(i.e. the number of samples per block).
|
|
136
|
+
|
|
137
|
+
Returns
|
|
138
|
+
-------
|
|
139
|
+
Average of the output of source.
|
|
140
|
+
Yields samples in blocks of shape (num, num_channels).
|
|
141
|
+
The last block may be shorter than num.
|
|
142
|
+
|
|
143
|
+
"""
|
|
144
|
+
nav = self.num_per_average
|
|
145
|
+
for temp in self.source.result(num * nav):
|
|
146
|
+
ns, nc = temp.shape
|
|
147
|
+
nso = int(ns / nav)
|
|
148
|
+
if nso > 0:
|
|
149
|
+
yield temp[: nso * nav].reshape((nso, -1, nc)).mean(axis=1)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class Cache(InOut):
|
|
153
|
+
"""Caches source output in cache file.
|
|
154
|
+
|
|
155
|
+
This class is used to cache the output of a :class:`acoular.base.Generator` derived source
|
|
156
|
+
object in a cache file to circumvent time-consuming re-calculation.
|
|
157
|
+
The cache file is created in the Acoular cache directory.
|
|
158
|
+
|
|
159
|
+
Examples
|
|
160
|
+
--------
|
|
161
|
+
>>> import acoular as ac
|
|
162
|
+
>>> import numpy as np
|
|
163
|
+
>>>
|
|
164
|
+
>>> ac.config.h5library = 'tables'
|
|
165
|
+
>>> data = np.random.rand(1024, 1)
|
|
166
|
+
>>> ts = ac.TimeSamples(data=data, sample_freq=51200)
|
|
167
|
+
>>> fft = ac.RFFT(source=ts, block_size=1024)
|
|
168
|
+
>>> cache = ac.Cache(source=fft) # cache the output of the FFT in cache file
|
|
169
|
+
>>> for block in cache.result(num=1): # read the cached data block-wise
|
|
170
|
+
... print(block.shape)
|
|
171
|
+
[('void_cache.h5', 1)]
|
|
172
|
+
(1, 513)
|
|
173
|
+
|
|
174
|
+
The caching behaviour can be controlled by the :class:`~acoular.configuration.Config` instance
|
|
175
|
+
via the :attr:`~acoular.configuration.Config.global_caching` attribute.
|
|
176
|
+
To turn off caching, set :attr:`~acoular.configuration.Config.global_caching` to 'none' before
|
|
177
|
+
running the code. The cache file directory can be obtained (and set) via the
|
|
178
|
+
:attr:`~acoular.configuration.Config.cache_dir`
|
|
179
|
+
|
|
180
|
+
>>> ac.config.global_caching = 'none'
|
|
181
|
+
|
|
182
|
+
"""
|
|
183
|
+
|
|
184
|
+
# basename for cache
|
|
185
|
+
basename = Property(depends_on=['digest'])
|
|
186
|
+
|
|
187
|
+
# hdf5 cache file
|
|
188
|
+
h5f = Instance(H5CacheFileBase, transient=True)
|
|
189
|
+
|
|
190
|
+
# internal identifier
|
|
191
|
+
digest = Property(depends_on=['source.digest'])
|
|
192
|
+
|
|
193
|
+
@cached_property
|
|
194
|
+
def _get_digest(self):
|
|
195
|
+
return digest(self)
|
|
196
|
+
|
|
197
|
+
@cached_property
|
|
198
|
+
def _get_basename(self):
|
|
199
|
+
return find_basename(self.source)
|
|
200
|
+
|
|
201
|
+
def _pass_data(self, num):
|
|
202
|
+
yield from self.source.result(num)
|
|
203
|
+
|
|
204
|
+
def _write_data_to_cache(self, num):
|
|
205
|
+
nodename = 'tc_' + self.digest
|
|
206
|
+
for i, data in enumerate(self.source.result(num)):
|
|
207
|
+
if i == 0:
|
|
208
|
+
self.h5f.create_extendable_array(nodename, (0, data.shape[1]), data.dtype.name)
|
|
209
|
+
ac = self.h5f.get_data_by_reference(nodename)
|
|
210
|
+
self.h5f.set_node_attribute(ac, 'sample_freq', self.sample_freq)
|
|
211
|
+
self.h5f.set_node_attribute(ac, 'complete', False)
|
|
212
|
+
self.h5f.append_data(ac, data)
|
|
213
|
+
self.h5f.flush()
|
|
214
|
+
yield data
|
|
215
|
+
self.h5f.set_node_attribute(ac, 'complete', True)
|
|
216
|
+
|
|
217
|
+
def _get_data_from_cache(self, num):
|
|
218
|
+
nodename = 'tc_' + self.digest
|
|
219
|
+
ac = self.h5f.get_data_by_reference(nodename)
|
|
220
|
+
i = 0
|
|
221
|
+
while i < ac.shape[0]:
|
|
222
|
+
yield ac[i : i + num]
|
|
223
|
+
i += num
|
|
224
|
+
|
|
225
|
+
def _get_data_from_incomplete_cache(self, num):
|
|
226
|
+
nodename = 'tc_' + self.digest
|
|
227
|
+
ac = self.h5f.get_data_by_reference(nodename)
|
|
228
|
+
i = 0
|
|
229
|
+
nblocks = 0
|
|
230
|
+
while i + num <= ac.shape[0]:
|
|
231
|
+
yield ac[i : i + num]
|
|
232
|
+
nblocks += 1
|
|
233
|
+
i += num
|
|
234
|
+
self.h5f.remove_data(nodename)
|
|
235
|
+
for j, data in enumerate(self.source.result(num)):
|
|
236
|
+
if j == 0:
|
|
237
|
+
self.h5f.create_extendable_array(nodename, (0, data.shape[1]), data.dtype.name)
|
|
238
|
+
ac = self.h5f.get_data_by_reference(nodename)
|
|
239
|
+
self.h5f.set_node_attribute(ac, 'sample_freq', self.sample_freq)
|
|
240
|
+
self.h5f.set_node_attribute(ac, 'complete', False)
|
|
241
|
+
self.h5f.append_data(ac, data)
|
|
242
|
+
if j >= nblocks:
|
|
243
|
+
self.h5f.flush()
|
|
244
|
+
yield data
|
|
245
|
+
self.h5f.set_node_attribute(ac, 'complete', True)
|
|
246
|
+
|
|
247
|
+
# result generator: delivers input, possibly from cache
|
|
248
|
+
def result(self, num):
|
|
249
|
+
"""Python generator that yields the output from cache block-wise.
|
|
250
|
+
|
|
251
|
+
Parameters
|
|
252
|
+
----------
|
|
253
|
+
num : integer
|
|
254
|
+
This parameter defines the size of the blocks to be yielded
|
|
255
|
+
(i.e. the number of samples per block).
|
|
256
|
+
|
|
257
|
+
Returns
|
|
258
|
+
-------
|
|
259
|
+
Samples in blocks of shape (num, num_channels).
|
|
260
|
+
The last block may be shorter than num.
|
|
261
|
+
Echos the source output, but reads it from cache
|
|
262
|
+
when available and prevents unnecessary recalculation.
|
|
263
|
+
|
|
264
|
+
"""
|
|
265
|
+
if config.global_caching == 'none':
|
|
266
|
+
generator = self._pass_data
|
|
267
|
+
else:
|
|
268
|
+
nodename = 'tc_' + self.digest
|
|
269
|
+
H5cache.get_cache_file(self, self.basename)
|
|
270
|
+
if not self.h5f:
|
|
271
|
+
generator = self._pass_data
|
|
272
|
+
elif self.h5f.is_cached(nodename):
|
|
273
|
+
generator = self._get_data_from_cache
|
|
274
|
+
if config.global_caching == 'overwrite':
|
|
275
|
+
self.h5f.remove_data(nodename)
|
|
276
|
+
generator = self._write_data_to_cache
|
|
277
|
+
elif not self.h5f.get_data_by_reference(nodename).attrs.__contains__('complete'):
|
|
278
|
+
if config.global_caching == 'readonly':
|
|
279
|
+
generator = self._pass_data
|
|
280
|
+
else:
|
|
281
|
+
generator = self._get_data_from_incomplete_cache
|
|
282
|
+
elif not self.h5f.get_data_by_reference(nodename).attrs['complete']:
|
|
283
|
+
if config.global_caching == 'readonly':
|
|
284
|
+
warn(
|
|
285
|
+
f"Cache file is incomplete for nodename {nodename}. With config.global_caching='readonly', \
|
|
286
|
+
the cache file will not be used!",
|
|
287
|
+
Warning,
|
|
288
|
+
stacklevel=1,
|
|
289
|
+
)
|
|
290
|
+
generator = self._pass_data
|
|
291
|
+
else:
|
|
292
|
+
generator = self._get_data_from_incomplete_cache
|
|
293
|
+
elif not self.h5f.is_cached(nodename):
|
|
294
|
+
generator = self._write_data_to_cache
|
|
295
|
+
if config.global_caching == 'readonly':
|
|
296
|
+
generator = self._pass_data
|
|
297
|
+
yield from generator(num)
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
class SampleSplitter(InOut):
|
|
301
|
+
"""
|
|
302
|
+
Distributes data from a source to several following objects in a block-wise manner.
|
|
303
|
+
|
|
304
|
+
The `SampleSplitter` class is designed to take data from a single
|
|
305
|
+
:class:`~acoular.base.Generator` derived source object and distribute it to multiple
|
|
306
|
+
:class:`~acoular.base.Generator` derived objects. For each object, the :class:`SampleSplitter`
|
|
307
|
+
holds a virtual block buffer from which the subsequently connected objects receive data in a
|
|
308
|
+
first-in-first-out (FIFO) manner. This allows for efficient data handling and processing in
|
|
309
|
+
parallel.
|
|
310
|
+
|
|
311
|
+
Examples
|
|
312
|
+
--------
|
|
313
|
+
Consider a time domain source signal stream from which the FFT spectra and the signal power
|
|
314
|
+
are calculated block-wise and in parallel by using the :class:`~acoular.fprocess.RFFT` as well
|
|
315
|
+
as the :class:`~acoular.tprocess.TimePower` and :class:`~acoular.process.Average`
|
|
316
|
+
objects. The `SampleSplitter` object is used to distribute the incoming blocks of data to the
|
|
317
|
+
`RFFT` and `TimePower` object buffers whenever one of these objects calls the :meth:`result`
|
|
318
|
+
generator.
|
|
319
|
+
For the `TimePower` object, the buffer size is set to 10 blocks. If the buffer is full, an error
|
|
320
|
+
is raised since the buffer overflow treatment is set to 'error'. For the `RFFT` object, the
|
|
321
|
+
block buffer size is set to 1 block, and the buffer overflow treatment is set to 'none'. This
|
|
322
|
+
is done to reduce latency in the FFT calculation, as the FFT calculation may take longer than
|
|
323
|
+
the signal power calculation. If new data is available and the block buffer for the `RFFT`
|
|
324
|
+
object is full, the `SampleSplitter` will drop the oldest block of data in the buffer. Thus, the
|
|
325
|
+
`RFFT` object will always receive the most recent block of data.
|
|
326
|
+
|
|
327
|
+
>>> import acoular as ac
|
|
328
|
+
>>> import numpy as np
|
|
329
|
+
>>>
|
|
330
|
+
>>> # create a time domain signal source
|
|
331
|
+
>>> ts = ac.TimeSamples(data=np.random.rand(1024, 1), sample_freq=51200)
|
|
332
|
+
>>>
|
|
333
|
+
>>> # create the sample splitter object
|
|
334
|
+
>>> ss = ac.SampleSplitter(source=ts)
|
|
335
|
+
>>>
|
|
336
|
+
>>> # create the FFT spectra and further objects that receive the data
|
|
337
|
+
>>> fft = ac.RFFT(source=ss, block_size=64)
|
|
338
|
+
>>> pow = ac.TimePower(source=ss)
|
|
339
|
+
>>> avg = ac.Average(source=pow, num_per_average=64)
|
|
340
|
+
>>>
|
|
341
|
+
>>> # register the subsequent processing block objects at the sample splitter
|
|
342
|
+
>>> ss.register_object(fft, buffer_size=1, buffer_overflow_treatment='none')
|
|
343
|
+
>>> ss.register_object(pow, buffer_size=10, buffer_overflow_treatment='error')
|
|
344
|
+
|
|
345
|
+
After object registration, the `SampleSplitter` object is ready to distribute the data to the
|
|
346
|
+
object buffers. The block buffers can be accessed via the `block_buffer` attribute of the
|
|
347
|
+
`SampleSplitter` object.
|
|
348
|
+
|
|
349
|
+
>>> ss.block_buffer.values()
|
|
350
|
+
dict_values([deque([], maxlen=1), deque([], maxlen=10)])
|
|
351
|
+
|
|
352
|
+
Calling the result method of the FFT object will start the data collection and distribution
|
|
353
|
+
process.
|
|
354
|
+
|
|
355
|
+
>>> generator = fft.result(num=1)
|
|
356
|
+
>>> fft_res = next(generator)
|
|
357
|
+
|
|
358
|
+
Although we haven't called the result method of the signal power object, one data block is
|
|
359
|
+
already available in the buffer.
|
|
360
|
+
|
|
361
|
+
>>> print(len(ss.block_buffer[pow]))
|
|
362
|
+
1
|
|
363
|
+
|
|
364
|
+
To remove registered objects from the `SampleSplitter`, use the :meth:`remove_object` method.
|
|
365
|
+
|
|
366
|
+
>>> ss.remove_object(pow)
|
|
367
|
+
>>> print(len(ss.block_buffer))
|
|
368
|
+
1
|
|
369
|
+
"""
|
|
370
|
+
|
|
371
|
+
#: dictionary with block buffers (dict values) of registered objects (dict
|
|
372
|
+
#: keys).
|
|
373
|
+
block_buffer = Dict(key_trait=Instance(Generator))
|
|
374
|
+
|
|
375
|
+
#: max elements/blocks in block buffers.
|
|
376
|
+
#: Can be set individually for each registered object.
|
|
377
|
+
#: Default is 100 blocks for each registered object.
|
|
378
|
+
buffer_size = Union(
|
|
379
|
+
Int,
|
|
380
|
+
Dict(key_trait=Instance(Generator), value_trait=Int),
|
|
381
|
+
default_value=100,
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
#: defines behaviour in case of block_buffer overflow. Can be set individually
|
|
385
|
+
#: for each registered object.
|
|
386
|
+
#:
|
|
387
|
+
#: * 'error': an IOError is thrown by the class
|
|
388
|
+
#: * 'warning': a warning is displayed. Possibly leads to lost blocks of data
|
|
389
|
+
#: * 'none': nothing happens. Possibly leads to lost blocks of data
|
|
390
|
+
buffer_overflow_treatment = Dict(
|
|
391
|
+
key_trait=Instance(Generator),
|
|
392
|
+
value_trait=Enum('error', 'warning', 'none'),
|
|
393
|
+
desc='defines buffer overflow behaviour.',
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
# shadow trait to monitor if source deliver samples or is empty
|
|
397
|
+
_source_generator_exist = Bool(False)
|
|
398
|
+
|
|
399
|
+
# shadow trait to monitor if buffer of objects with overflow treatment = 'error'
|
|
400
|
+
# or warning is overfilled. Error will be raised in all threads.
|
|
401
|
+
_buffer_overflow = Bool(False)
|
|
402
|
+
|
|
403
|
+
# Helper Trait holds source generator
|
|
404
|
+
_source_generator = Instance(LockedGenerator)
|
|
405
|
+
|
|
406
|
+
def _create_block_buffer(self, obj, buffer_size=None):
|
|
407
|
+
if buffer_size is None:
|
|
408
|
+
buffer_size = self.buffer_size if isinstance(self.buffer_size, int) else self.buffer_size[obj]
|
|
409
|
+
self.block_buffer[obj] = deque([], maxlen=buffer_size)
|
|
410
|
+
|
|
411
|
+
def _create_buffer_overflow_treatment(self, obj, buffer_overflow_treatment=None):
|
|
412
|
+
if buffer_overflow_treatment is None:
|
|
413
|
+
buffer_overflow_treatment = 'error'
|
|
414
|
+
self.buffer_overflow_treatment[obj] = buffer_overflow_treatment
|
|
415
|
+
|
|
416
|
+
def _clear_block_buffer(self, obj):
|
|
417
|
+
self.block_buffer[obj].clear()
|
|
418
|
+
|
|
419
|
+
def _remove_block_buffer(self, obj):
|
|
420
|
+
del self.block_buffer[obj]
|
|
421
|
+
|
|
422
|
+
def _remove_buffer_overflow_treatment(self, obj):
|
|
423
|
+
del self.buffer_overflow_treatment[obj]
|
|
424
|
+
|
|
425
|
+
def _assert_obj_registered(self, obj):
|
|
426
|
+
if obj not in self.block_buffer:
|
|
427
|
+
msg = f'calling object {obj} is not registered.'
|
|
428
|
+
raise OSError(msg)
|
|
429
|
+
|
|
430
|
+
def _get_objs_to_inspect(self):
|
|
431
|
+
return [obj for obj in self.buffer_overflow_treatment if self.buffer_overflow_treatment[obj] != 'none']
|
|
432
|
+
|
|
433
|
+
def _inspect_buffer_levels(self, inspect_objs):
|
|
434
|
+
for obj in inspect_objs:
|
|
435
|
+
if len(self.block_buffer[obj]) == self.buffer_size:
|
|
436
|
+
if self.buffer_overflow_treatment[obj] == 'error':
|
|
437
|
+
self._buffer_overflow = True
|
|
438
|
+
elif self.buffer_overflow_treatment[obj] == 'warning':
|
|
439
|
+
warn(f'overfilled buffer for object: {obj} data will get lost', UserWarning, stacklevel=1)
|
|
440
|
+
|
|
441
|
+
def _create_source_generator(self, num):
|
|
442
|
+
for obj in self.block_buffer:
|
|
443
|
+
self._clear_block_buffer(obj)
|
|
444
|
+
self._buffer_overflow = False # reset overflow bool
|
|
445
|
+
self._source_generator = LockedGenerator(self.source.result(num))
|
|
446
|
+
self._source_generator_exist = True # indicates full generator
|
|
447
|
+
|
|
448
|
+
def _fill_block_buffers(self):
|
|
449
|
+
next_block = next(self._source_generator)
|
|
450
|
+
[self.block_buffer[obj].appendleft(next_block) for obj in self.block_buffer]
|
|
451
|
+
|
|
452
|
+
@on_trait_change('buffer_size')
|
|
453
|
+
def _change_buffer_size(self): #
|
|
454
|
+
for obj in self.block_buffer:
|
|
455
|
+
self._remove_block_buffer(obj)
|
|
456
|
+
self._create_block_buffer(obj)
|
|
457
|
+
|
|
458
|
+
def register_object(self, *objects_to_register, buffer_size=None, buffer_overflow_treatment=None):
|
|
459
|
+
"""Register one or multiple :class:`~acoular.base.Generator` objects to the SampleSplitter.
|
|
460
|
+
|
|
461
|
+
Creates a block buffer for each object and sets the buffer size and buffer
|
|
462
|
+
overflow treatment.
|
|
463
|
+
|
|
464
|
+
Parameters
|
|
465
|
+
----------
|
|
466
|
+
objects_to_register : Generator
|
|
467
|
+
One or multiple :class:`~acoular.base.Generator` derived objects to be registered.
|
|
468
|
+
buffer_size : int, optional
|
|
469
|
+
Maximum number of elements/blocks in block buffer. If not set, the default buffer size
|
|
470
|
+
of 100 blocks is used.
|
|
471
|
+
buffer_overflow_treatment : str, optional
|
|
472
|
+
Defines the behaviour in case of reaching the buffer size.
|
|
473
|
+
Can be set individually for each object. Possible values are 'error', 'warning', and
|
|
474
|
+
'none'. If not set, the default value is 'error'.
|
|
475
|
+
"""
|
|
476
|
+
for obj in objects_to_register:
|
|
477
|
+
if obj not in self.block_buffer:
|
|
478
|
+
self._create_block_buffer(obj, buffer_size)
|
|
479
|
+
self._create_buffer_overflow_treatment(obj, buffer_overflow_treatment)
|
|
480
|
+
else:
|
|
481
|
+
msg = f'object {obj} is already registered.'
|
|
482
|
+
raise OSError(msg)
|
|
483
|
+
|
|
484
|
+
def remove_object(self, *objects_to_remove):
|
|
485
|
+
"""Function that can be used to remove registered objects.
|
|
486
|
+
|
|
487
|
+
If no objects are given, all registered objects are removed.
|
|
488
|
+
|
|
489
|
+
Parameters
|
|
490
|
+
----------
|
|
491
|
+
objects_to_remove : list
|
|
492
|
+
One or multiple :class:`~acoular.base.Generator` derived objects to be removed.
|
|
493
|
+
If not set, all registered objects are removed.
|
|
494
|
+
"""
|
|
495
|
+
if not objects_to_remove:
|
|
496
|
+
objects_to_remove = list(self.block_buffer.keys())
|
|
497
|
+
for obj in objects_to_remove:
|
|
498
|
+
self._remove_block_buffer(obj)
|
|
499
|
+
self._remove_buffer_overflow_treatment(obj)
|
|
500
|
+
|
|
501
|
+
def result(self, num):
|
|
502
|
+
"""Python generator that yields the output block-wise from block-buffer.
|
|
503
|
+
|
|
504
|
+
Parameters
|
|
505
|
+
----------
|
|
506
|
+
num : integer
|
|
507
|
+
This parameter defines the size of the blocks to be yielded
|
|
508
|
+
(i.e. the number of samples per block).
|
|
509
|
+
|
|
510
|
+
Returns
|
|
511
|
+
-------
|
|
512
|
+
Samples in blocks of shape (num, num_channels).
|
|
513
|
+
Delivers a block of samples to the calling object.
|
|
514
|
+
The last block may be shorter than num.
|
|
515
|
+
|
|
516
|
+
"""
|
|
517
|
+
calling_obj = currentframe().f_back.f_locals['self']
|
|
518
|
+
self._assert_obj_registered(calling_obj)
|
|
519
|
+
objs_to_inspect = self._get_objs_to_inspect()
|
|
520
|
+
|
|
521
|
+
if not self._source_generator_exist:
|
|
522
|
+
self._create_source_generator(num)
|
|
523
|
+
|
|
524
|
+
while not self._buffer_overflow:
|
|
525
|
+
if self.block_buffer[calling_obj]:
|
|
526
|
+
yield self.block_buffer[calling_obj].pop()
|
|
527
|
+
else:
|
|
528
|
+
self._inspect_buffer_levels(objs_to_inspect)
|
|
529
|
+
try:
|
|
530
|
+
self._fill_block_buffers()
|
|
531
|
+
except StopIteration:
|
|
532
|
+
self._source_generator_exist = False
|
|
533
|
+
return
|
|
534
|
+
else:
|
|
535
|
+
msg = 'Maximum size of block buffer is reached!'
|
|
536
|
+
raise OSError(msg)
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
class TimeAverage(Average):
|
|
540
|
+
"""Calculates average of the signal (Alias for :class:`acoular.process.Average`).
|
|
541
|
+
|
|
542
|
+
.. deprecated:: 24.10
|
|
543
|
+
Using :class:`~acoular.process.TimeAverage` is deprecated and will be removed in Acoular
|
|
544
|
+
version 25.07. Use :class:`~acoular.process.Average` instead.
|
|
545
|
+
"""
|
|
546
|
+
|
|
547
|
+
def __init__(self, *args, **kwargs):
|
|
548
|
+
super().__init__(*args, **kwargs)
|
|
549
|
+
warn(
|
|
550
|
+
'Using TimeAverage is deprecated and will be removed in Acoular version 25.07. Use Average instead.',
|
|
551
|
+
DeprecationWarning,
|
|
552
|
+
stacklevel=2,
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
class TimeCache(Cache):
|
|
557
|
+
"""Caches source signals in cache file (Alias for :class:`acoular.process.Cache`).
|
|
558
|
+
|
|
559
|
+
.. deprecated:: 24.10
|
|
560
|
+
Using :class:`~acoular.process.TimeCache` is deprecated and will be removed in Acoular
|
|
561
|
+
version 25.07. Use :class:`~acoular.process.Cache` instead.
|
|
562
|
+
"""
|
|
563
|
+
|
|
564
|
+
def __init__(self, *args, **kwargs):
|
|
565
|
+
super().__init__(*args, **kwargs)
|
|
566
|
+
warn(
|
|
567
|
+
'Using TimeCache is deprecated and will be removed in Acoular version 25.07. Use Cache instead.',
|
|
568
|
+
DeprecationWarning,
|
|
569
|
+
stacklevel=2,
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
class SamplesBuffer(InOut):
|
|
574
|
+
"""Handles buffering of samples from a source.
|
|
575
|
+
|
|
576
|
+
This class is used to buffer samples from a source and provide them in blocks
|
|
577
|
+
of a specified size. There are several usecases for this class, as demonstrated in
|
|
578
|
+
the following.
|
|
579
|
+
|
|
580
|
+
Examples
|
|
581
|
+
--------
|
|
582
|
+
Let us assume we want to draw blocks of 16 samples from our source, but we want to make sure
|
|
583
|
+
that we always have twice the number of samples in the buffer. We can achieve this simple
|
|
584
|
+
behaviour by using the following code:
|
|
585
|
+
|
|
586
|
+
>>> import acoular as ac
|
|
587
|
+
>>> import numpy as np
|
|
588
|
+
>>> # create a white noise source with 512 samples
|
|
589
|
+
>>> source = ac.TimeSamples(
|
|
590
|
+
... data=ac.WNoiseGenerator(
|
|
591
|
+
... sample_freq=64,
|
|
592
|
+
... num_samples=512,
|
|
593
|
+
... ).signal()[:, np.newaxis],
|
|
594
|
+
... sample_freq=64,
|
|
595
|
+
... )
|
|
596
|
+
>>> # create a buffer with a size of 32 samples
|
|
597
|
+
>>> buffer = ac.process.SamplesBuffer(source=source, length=32)
|
|
598
|
+
>>> # get the first block of 16 samples
|
|
599
|
+
>>> block = next(buffer.result(num=16))
|
|
600
|
+
>>> np.testing.assert_array_equal(block, source.data[:16])
|
|
601
|
+
|
|
602
|
+
Here, on the first call to the result method, the buffer will fill up by collecting blocks with
|
|
603
|
+
same size from the source. The buffer will then return the first block of 16 samples. On the
|
|
604
|
+
next call to the result method, the buffer will be filled again and returns the next block of 16
|
|
605
|
+
samples.
|
|
606
|
+
|
|
607
|
+
In some cases, we might want to draw a different number of samples from the source than we want
|
|
608
|
+
to return. This can be achieved by setting the `source_num` trait of the buffer. A special case
|
|
609
|
+
is the return of a variable number of samples. This is the case, for example, in the class
|
|
610
|
+
:class:`~acoular.tbeamform.BeamformerTimeTraj`, in which a different number of time samples is
|
|
611
|
+
required from the buffer for further delay-and-sum processing depending on the expected delay,
|
|
612
|
+
which can be vary for moving sources. At the same time, however, only 'num' samples should be
|
|
613
|
+
written to and removed from the buffer. This behavior can be achieved by setting the
|
|
614
|
+
`shift_index_by` trait to 'num' and by setting the `result_num` trait to the number of samples
|
|
615
|
+
that should be returned by the result function.
|
|
616
|
+
|
|
617
|
+
>>> buffer = ac.process.SamplesBuffer(source=source, length=32, result_num=20, shift_index_by='num')
|
|
618
|
+
>>> block_sizes = []
|
|
619
|
+
>>> block_sizes.append(
|
|
620
|
+
... next(buffer.result(num=16)).shape[0]
|
|
621
|
+
... ) # this time, the buffer will return 20 samples, but the buffer will only forget the first 16 samples
|
|
622
|
+
>>> buffer.result_num = 24
|
|
623
|
+
>>> block_sizes.append(
|
|
624
|
+
... next(buffer.result(num=16)).shape[0]
|
|
625
|
+
... ) # this time, the buffer will return 24 samples, but the buffer will only forget the first 16 samples
|
|
626
|
+
>>> np.testing.assert_array_equal(block_sizes, [20, 24])
|
|
627
|
+
""" # noqa: W505
|
|
628
|
+
|
|
629
|
+
#: number of samples that fit in the buffer
|
|
630
|
+
length = Int(desc='number of samples that fit in the buffer')
|
|
631
|
+
|
|
632
|
+
#: number of samples per block to obtain from the source. If 'None', use 'num' argument of
|
|
633
|
+
#: result method
|
|
634
|
+
source_num = Union(
|
|
635
|
+
None,
|
|
636
|
+
Int(),
|
|
637
|
+
default_value=None,
|
|
638
|
+
desc='number of samples to return from the source. If "None", use "num" argument of result method',
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
#: number of samples to return from the buffer. If 'None', use 'num' argument of result method
|
|
642
|
+
result_num = Union(
|
|
643
|
+
None,
|
|
644
|
+
Int(),
|
|
645
|
+
default_value=None,
|
|
646
|
+
desc="number of samples to return from the buffer. If 'None', use 'num' argument of result method",
|
|
647
|
+
)
|
|
648
|
+
|
|
649
|
+
#: index shift value for the buffer. If "result_num", buffer will return and forget 'result_num'
|
|
650
|
+
#: samples. If "num", buffer will return 'result_num' samples but will forget 'num' samples
|
|
651
|
+
shift_index_by = Enum(
|
|
652
|
+
('result_num', 'num'),
|
|
653
|
+
desc=(
|
|
654
|
+
'index shift value for the buffer. If "result_num", use "result_num" trait.'
|
|
655
|
+
' If "num", use "num" argument of result method'
|
|
656
|
+
),
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
#: current filling level of buffer
|
|
660
|
+
level = Property(desc='current filling level of buffer')
|
|
661
|
+
|
|
662
|
+
#: data type of the buffer elements
|
|
663
|
+
dtype = Any(desc='data type of the buffer')
|
|
664
|
+
|
|
665
|
+
# flag to indicate that the source is empty, for internal use
|
|
666
|
+
_empty_source = Bool(False, desc='flag to indicate that the source is empty')
|
|
667
|
+
|
|
668
|
+
# the buffer for processing
|
|
669
|
+
_buffer = Array(shape=(None, None), desc='buffer for block processing')
|
|
670
|
+
|
|
671
|
+
# current index in buffer
|
|
672
|
+
_index = Int(desc='current index in buffer')
|
|
673
|
+
|
|
674
|
+
def _get_level(self):
|
|
675
|
+
return self._buffer.shape[0] - self._index
|
|
676
|
+
|
|
677
|
+
def _create_new_buffer(self):
|
|
678
|
+
self._buffer = np.zeros((self.length, self.num_channels), dtype=self.dtype)
|
|
679
|
+
self._index = self.length
|
|
680
|
+
self._empty_source = False
|
|
681
|
+
|
|
682
|
+
def _write_to_buffer(self, data):
|
|
683
|
+
ns = data.shape[0]
|
|
684
|
+
self._buffer[0 : (self.length - ns)] = self._buffer[-(self.length - ns) :]
|
|
685
|
+
self._buffer[-ns:, :] = data.astype(self.dtype)
|
|
686
|
+
self._index -= ns
|
|
687
|
+
|
|
688
|
+
def increase_buffer(self, num):
|
|
689
|
+
"""Increase the buffer by 'num' samples.
|
|
690
|
+
|
|
691
|
+
Returns
|
|
692
|
+
-------
|
|
693
|
+
None
|
|
694
|
+
"""
|
|
695
|
+
ar = np.zeros((num, self.num_channels), dtype=self._buffer.dtype)
|
|
696
|
+
self._buffer = np.concatenate((ar, self._buffer), axis=0)
|
|
697
|
+
self._index += num
|
|
698
|
+
self.length += num
|
|
699
|
+
|
|
700
|
+
def read_from_buffer(self, num):
|
|
701
|
+
"""Read samples from the buffer.
|
|
702
|
+
|
|
703
|
+
Parameters
|
|
704
|
+
----------
|
|
705
|
+
num : int
|
|
706
|
+
number of samples to read from the buffer.
|
|
707
|
+
|
|
708
|
+
Returns
|
|
709
|
+
-------
|
|
710
|
+
numpy.ndarray
|
|
711
|
+
block of samples from the buffer
|
|
712
|
+
|
|
713
|
+
"""
|
|
714
|
+
rnum = num if self.result_num is None else self.result_num
|
|
715
|
+
rnum = rnum if self.level >= rnum else self.level
|
|
716
|
+
data = self._buffer[self._index : self._index + rnum]
|
|
717
|
+
if self.shift_index_by == 'result_num':
|
|
718
|
+
self._index += rnum
|
|
719
|
+
else:
|
|
720
|
+
self._index += num
|
|
721
|
+
return data
|
|
722
|
+
|
|
723
|
+
def fill_buffer(self, snum):
|
|
724
|
+
"""Fill the buffer with samples from the source.
|
|
725
|
+
|
|
726
|
+
Parameters
|
|
727
|
+
----------
|
|
728
|
+
snum : int
|
|
729
|
+
number of samples to return from the source.
|
|
730
|
+
|
|
731
|
+
Yields
|
|
732
|
+
------
|
|
733
|
+
None
|
|
734
|
+
"""
|
|
735
|
+
source_generator = self.source.result(snum)
|
|
736
|
+
while not self._empty_source:
|
|
737
|
+
while self._index >= snum:
|
|
738
|
+
if self.result_num is not None:
|
|
739
|
+
while self.result_num > self.length:
|
|
740
|
+
self.increase_buffer(snum)
|
|
741
|
+
try:
|
|
742
|
+
self._write_to_buffer(next(source_generator))
|
|
743
|
+
except StopIteration:
|
|
744
|
+
self._empty_source = True
|
|
745
|
+
break
|
|
746
|
+
yield
|
|
747
|
+
|
|
748
|
+
def result(self, num):
|
|
749
|
+
"""Return blocks of samples from the buffer.
|
|
750
|
+
|
|
751
|
+
Parameters
|
|
752
|
+
----------
|
|
753
|
+
num : int
|
|
754
|
+
number of samples to return.
|
|
755
|
+
|
|
756
|
+
Yields
|
|
757
|
+
------
|
|
758
|
+
numpy.ndarray
|
|
759
|
+
block of samples from the buffer
|
|
760
|
+
"""
|
|
761
|
+
self._create_new_buffer()
|
|
762
|
+
snum = num
|
|
763
|
+
if self.source_num is not None:
|
|
764
|
+
snum = self.source_num
|
|
765
|
+
for _ in self.fill_buffer(snum):
|
|
766
|
+
if self.level > 0:
|
|
767
|
+
yield self.read_from_buffer(num)
|
|
768
|
+
else:
|
|
769
|
+
break
|
|
770
|
+
while self.level > 0:
|
|
771
|
+
yield self.read_from_buffer(num)
|