acoular 24.7__py3-none-any.whl → 25.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- acoular/__init__.py +21 -9
- acoular/aiaa/__init__.py +12 -0
- acoular/{tools → aiaa}/aiaa.py +26 -31
- acoular/base.py +332 -0
- acoular/calib.py +129 -34
- acoular/configuration.py +13 -11
- acoular/demo/__init__.py +1 -0
- acoular/demo/acoular_demo.py +30 -17
- acoular/deprecation.py +85 -0
- acoular/environments.py +38 -24
- acoular/fastFuncs.py +90 -84
- acoular/fbeamform.py +342 -387
- acoular/fprocess.py +376 -0
- acoular/grids.py +122 -150
- acoular/h5cache.py +29 -40
- acoular/h5files.py +2 -6
- acoular/microphones.py +50 -59
- acoular/process.py +771 -0
- acoular/sdinput.py +35 -21
- acoular/signals.py +120 -113
- acoular/sources.py +208 -234
- acoular/spectra.py +59 -254
- acoular/tbeamform.py +280 -280
- acoular/tfastfuncs.py +21 -21
- acoular/tools/__init__.py +3 -7
- acoular/tools/helpers.py +218 -4
- acoular/tools/metrics.py +5 -5
- acoular/tools/utils.py +116 -0
- acoular/tprocess.py +416 -741
- acoular/traitsviews.py +15 -13
- acoular/trajectory.py +7 -10
- acoular/version.py +2 -2
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/METADATA +63 -21
- acoular-25.1.dist-info/RECORD +56 -0
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/WHEEL +1 -1
- acoular-24.7.dist-info/RECORD +0 -50
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/licenses/AUTHORS.rst +0 -0
- {acoular-24.7.dist-info → acoular-25.1.dist-info}/licenses/LICENSE +0 -0
acoular/fbeamform.py
CHANGED
|
@@ -60,7 +60,6 @@ from numpy import (
|
|
|
60
60
|
integer,
|
|
61
61
|
invert,
|
|
62
62
|
isscalar,
|
|
63
|
-
linalg,
|
|
64
63
|
log10,
|
|
65
64
|
ndarray,
|
|
66
65
|
newaxis,
|
|
@@ -68,12 +67,12 @@ from numpy import (
|
|
|
68
67
|
pi,
|
|
69
68
|
real,
|
|
70
69
|
reshape,
|
|
71
|
-
round,
|
|
70
|
+
round, # noqa: A004
|
|
72
71
|
searchsorted,
|
|
73
72
|
sign,
|
|
74
73
|
size,
|
|
75
74
|
sqrt,
|
|
76
|
-
sum,
|
|
75
|
+
sum, # noqa: A004
|
|
77
76
|
tile,
|
|
78
77
|
trace,
|
|
79
78
|
tril,
|
|
@@ -82,9 +81,8 @@ from numpy import (
|
|
|
82
81
|
zeros,
|
|
83
82
|
zeros_like,
|
|
84
83
|
)
|
|
85
|
-
from numpy.linalg import norm
|
|
86
84
|
from packaging.version import parse
|
|
87
|
-
from scipy.linalg import eigh, eigvals, fractional_matrix_power, inv
|
|
85
|
+
from scipy.linalg import eigh, eigvals, fractional_matrix_power, inv, norm
|
|
88
86
|
from scipy.optimize import fmin_l_bfgs_b, linprog, nnls, shgo
|
|
89
87
|
from sklearn.linear_model import LassoLars, LassoLarsCV, LassoLarsIC, LinearRegression, OrthogonalMatchingPursuitCV
|
|
90
88
|
from traits.api import (
|
|
@@ -94,13 +92,12 @@ from traits.api import (
|
|
|
94
92
|
Dict,
|
|
95
93
|
Enum,
|
|
96
94
|
Float,
|
|
97
|
-
|
|
95
|
+
HasStrictTraits,
|
|
98
96
|
Instance,
|
|
99
97
|
Int,
|
|
100
98
|
List,
|
|
101
99
|
Property,
|
|
102
100
|
Range,
|
|
103
|
-
Trait,
|
|
104
101
|
Tuple,
|
|
105
102
|
cached_property,
|
|
106
103
|
on_trait_change,
|
|
@@ -108,7 +105,9 @@ from traits.api import (
|
|
|
108
105
|
)
|
|
109
106
|
from traits.trait_errors import TraitError
|
|
110
107
|
|
|
108
|
+
# acoular imports
|
|
111
109
|
from .configuration import config
|
|
110
|
+
from .deprecation import deprecated_alias
|
|
112
111
|
from .environments import Environment
|
|
113
112
|
from .fastFuncs import beamformerFreq, calcPointSpreadFunction, calcTransfer, damasSolverGaussSeidel
|
|
114
113
|
from .grids import Grid, Sector
|
|
@@ -117,25 +116,29 @@ from .h5files import H5CacheFileBase
|
|
|
117
116
|
from .internal import digest
|
|
118
117
|
from .microphones import MicGeom
|
|
119
118
|
from .spectra import PowerSpectra
|
|
119
|
+
from .tfastfuncs import _steer_I, _steer_II, _steer_III, _steer_IV
|
|
120
120
|
|
|
121
121
|
sklearn_ndict = {}
|
|
122
122
|
if parse(sklearn.__version__) < parse('1.4'):
|
|
123
|
-
sklearn_ndict['normalize'] = False
|
|
123
|
+
sklearn_ndict['normalize'] = False # pragma: no cover
|
|
124
124
|
|
|
125
|
-
BEAMFORMER_BASE_DIGEST_DEPENDENCIES = ['freq_data.digest', 'r_diag', 'r_diag_norm', 'precision', '
|
|
125
|
+
BEAMFORMER_BASE_DIGEST_DEPENDENCIES = ['freq_data.digest', 'r_diag', 'r_diag_norm', 'precision', 'steer.digest']
|
|
126
126
|
|
|
127
127
|
|
|
128
|
-
class SteeringVector(
|
|
129
|
-
"""Basic class for implementing steering vectors with monopole source transfer models.
|
|
128
|
+
class SteeringVector(HasStrictTraits):
|
|
129
|
+
"""Basic class for implementing steering vectors with monopole source transfer models.
|
|
130
|
+
|
|
131
|
+
Handles four different steering vector formulations. See :cite:`Sarradj2012` for details.
|
|
132
|
+
"""
|
|
130
133
|
|
|
131
134
|
#: :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
|
|
132
|
-
grid =
|
|
135
|
+
grid = Instance(Grid, desc='beamforming grid')
|
|
133
136
|
|
|
134
137
|
#: :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
|
|
135
|
-
mics =
|
|
138
|
+
mics = Instance(MicGeom, desc='microphone geometry')
|
|
136
139
|
|
|
137
|
-
#: Type of steering vectors, see also :
|
|
138
|
-
steer_type =
|
|
140
|
+
#: Type of steering vectors, see also :cite:`Sarradj2012`. Defaults to 'true level'.
|
|
141
|
+
steer_type = Enum('true level', 'true location', 'classic', 'inverse', desc='type of steering vectors used')
|
|
139
142
|
|
|
140
143
|
#: :class:`~acoular.environments.Environment` or derived object,
|
|
141
144
|
#: which provides information about the sound propagation in the medium.
|
|
@@ -160,6 +163,26 @@ class SteeringVector(HasPrivateTraits):
|
|
|
160
163
|
#: Defaults to [0.,0.,0.].
|
|
161
164
|
ref = Property(desc='reference position or distance')
|
|
162
165
|
|
|
166
|
+
_steer_funcs_freq = Dict(
|
|
167
|
+
{
|
|
168
|
+
'classic': lambda x: x / absolute(x) / x.shape[-1],
|
|
169
|
+
'inverse': lambda x: 1.0 / x.conj() / x.shape[-1],
|
|
170
|
+
'true level': lambda x: x / einsum('ij,ij->i', x, x.conj())[:, newaxis],
|
|
171
|
+
'true location': lambda x: x / sqrt(einsum('ij,ij->i', x, x.conj()) * x.shape[-1])[:, newaxis],
|
|
172
|
+
},
|
|
173
|
+
desc='dictionary of frequency domain steering vector functions',
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
_steer_funcs_time = Dict(
|
|
177
|
+
{
|
|
178
|
+
'classic': _steer_I,
|
|
179
|
+
'inverse': _steer_II,
|
|
180
|
+
'true level': _steer_III,
|
|
181
|
+
'true location': _steer_IV,
|
|
182
|
+
},
|
|
183
|
+
desc='dictionary of time domain steering vector functions',
|
|
184
|
+
)
|
|
185
|
+
|
|
163
186
|
def _set_ref(self, ref):
|
|
164
187
|
if isscalar(ref):
|
|
165
188
|
try:
|
|
@@ -180,17 +203,17 @@ class SteeringVector(HasPrivateTraits):
|
|
|
180
203
|
# internal identifier, use for inverse methods, excluding steering vector type
|
|
181
204
|
inv_digest = Property(depends_on=['env.digest', 'grid.digest', 'mics.digest', '_ref'])
|
|
182
205
|
|
|
183
|
-
@property_depends_on('grid.digest, env.digest, _ref')
|
|
206
|
+
@property_depends_on(['grid.digest', 'env.digest', '_ref'])
|
|
184
207
|
def _get_r0(self):
|
|
185
208
|
if isscalar(self.ref):
|
|
186
209
|
if self.ref > 0:
|
|
187
210
|
return full((self.grid.size,), self.ref)
|
|
188
211
|
return self.env._r(self.grid.pos())
|
|
189
|
-
return self.env._r(self.grid.pos
|
|
212
|
+
return self.env._r(self.grid.pos, self.ref[:, newaxis])
|
|
190
213
|
|
|
191
|
-
@property_depends_on('grid.digest, mics.digest, env.digest')
|
|
214
|
+
@property_depends_on(['grid.digest', 'mics.digest', 'env.digest'])
|
|
192
215
|
def _get_rm(self):
|
|
193
|
-
return atleast_2d(self.env._r(self.grid.pos
|
|
216
|
+
return atleast_2d(self.env._r(self.grid.pos, self.mics.pos))
|
|
194
217
|
|
|
195
218
|
@cached_property
|
|
196
219
|
def _get_digest(self):
|
|
@@ -231,8 +254,9 @@ class SteeringVector(HasPrivateTraits):
|
|
|
231
254
|
return trans
|
|
232
255
|
|
|
233
256
|
def steer_vector(self, f, ind=None):
|
|
234
|
-
"""Calculates the steering vectors based on the transfer function
|
|
235
|
-
|
|
257
|
+
"""Calculates the steering vectors based on the transfer function.
|
|
258
|
+
|
|
259
|
+
See also :cite:`Sarradj2012`.
|
|
236
260
|
|
|
237
261
|
Parameters
|
|
238
262
|
----------
|
|
@@ -249,12 +273,7 @@ class SteeringVector(HasPrivateTraits):
|
|
|
249
273
|
array of shape (ngridpts, nmics) containing the steering vectors for the given frequency
|
|
250
274
|
|
|
251
275
|
"""
|
|
252
|
-
func =
|
|
253
|
-
'classic': lambda x: x / absolute(x) / x.shape[-1],
|
|
254
|
-
'inverse': lambda x: 1.0 / x.conj() / x.shape[-1],
|
|
255
|
-
'true level': lambda x: x / einsum('ij,ij->i', x, x.conj())[:, newaxis],
|
|
256
|
-
'true location': lambda x: x / sqrt(einsum('ij,ij->i', x, x.conj()) * x.shape[-1])[:, newaxis],
|
|
257
|
-
}[self.steer_type]
|
|
276
|
+
func = self._steer_funcs_freq[self.steer_type]
|
|
258
277
|
return func(self.transfer(f, ind))
|
|
259
278
|
|
|
260
279
|
|
|
@@ -286,104 +305,17 @@ class LazyBfResult:
|
|
|
286
305
|
return self.bf._ac.__getitem__(key)
|
|
287
306
|
|
|
288
307
|
|
|
289
|
-
class BeamformerBase(
|
|
308
|
+
class BeamformerBase(HasStrictTraits):
|
|
290
309
|
"""Beamforming using the basic delay-and-sum algorithm in the frequency domain."""
|
|
291
310
|
|
|
292
311
|
# Instance of :class:`~acoular.fbeamform.SteeringVector` or its derived classes
|
|
293
312
|
# that contains information about the steering vector. This is a private trait.
|
|
294
313
|
# Do not set this directly, use `steer` trait instead.
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
#: :class:`~acoular.fbeamform.SteeringVector` or derived object.
|
|
298
|
-
#: Defaults to :class:`~acoular.fbeamform.SteeringVector` object.
|
|
299
|
-
steer = Property(desc='steering vector object')
|
|
300
|
-
|
|
301
|
-
def _get_steer(self):
|
|
302
|
-
return self._steer_obj
|
|
303
|
-
|
|
304
|
-
def _set_steer(self, steer):
|
|
305
|
-
if isinstance(steer, SteeringVector):
|
|
306
|
-
self._steer_obj = steer
|
|
307
|
-
elif steer in ('true level', 'true location', 'classic', 'inverse'):
|
|
308
|
-
# Type of steering vectors, see also :ref:`Sarradj, 2012<Sarradj2012>`.
|
|
309
|
-
warn(
|
|
310
|
-
"Deprecated use of 'steer' trait. Please use object of class 'SteeringVector' in the future.",
|
|
311
|
-
Warning,
|
|
312
|
-
stacklevel=2,
|
|
313
|
-
)
|
|
314
|
-
self._steer_obj.steer_type = steer
|
|
315
|
-
else:
|
|
316
|
-
raise (TraitError(args=self, name='steer', info='SteeringVector', value=steer))
|
|
317
|
-
|
|
318
|
-
# --- List of backwards compatibility traits and their setters/getters -----------
|
|
319
|
-
|
|
320
|
-
# :class:`~acoular.environments.Environment` or derived object.
|
|
321
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
322
|
-
# Now governed by :attr:`steer` trait.
|
|
323
|
-
env = Property()
|
|
324
|
-
|
|
325
|
-
def _get_env(self):
|
|
326
|
-
return self._steer_obj.env
|
|
327
|
-
|
|
328
|
-
def _set_env(self, env):
|
|
329
|
-
warn("Deprecated use of 'env' trait. ", Warning, stacklevel=2)
|
|
330
|
-
self._steer_obj.env = env
|
|
331
|
-
|
|
332
|
-
# The speed of sound.
|
|
333
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
334
|
-
# Now governed by :attr:`steer` trait.
|
|
335
|
-
c = Property()
|
|
336
|
-
|
|
337
|
-
def _get_c(self):
|
|
338
|
-
return self._steer_obj.env.c
|
|
339
|
-
|
|
340
|
-
def _set_c(self, c):
|
|
341
|
-
warn("Deprecated use of 'c' trait. ", Warning, stacklevel=2)
|
|
342
|
-
self._steer_obj.env.c = c
|
|
343
|
-
|
|
344
|
-
# :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
|
|
345
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
346
|
-
# Now governed by :attr:`steer` trait.
|
|
347
|
-
grid = Property()
|
|
348
|
-
|
|
349
|
-
def _get_grid(self):
|
|
350
|
-
return self._steer_obj.grid
|
|
351
|
-
|
|
352
|
-
def _set_grid(self, grid):
|
|
353
|
-
warn("Deprecated use of 'grid' trait. ", Warning, stacklevel=2)
|
|
354
|
-
self._steer_obj.grid = grid
|
|
355
|
-
|
|
356
|
-
# :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
|
|
357
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
358
|
-
# Now governed by :attr:`steer` trait
|
|
359
|
-
mpos = Property()
|
|
360
|
-
|
|
361
|
-
def _get_mpos(self):
|
|
362
|
-
return self._steer_obj.mics
|
|
363
|
-
|
|
364
|
-
def _set_mpos(self, mpos):
|
|
365
|
-
warn("Deprecated use of 'mpos' trait. ", Warning, stacklevel=2)
|
|
366
|
-
self._steer_obj.mics = mpos
|
|
367
|
-
|
|
368
|
-
# Sound travel distances from microphone array center to grid points (r0)
|
|
369
|
-
# and all array mics to grid points (rm). Readonly.
|
|
370
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
371
|
-
# Now governed by :attr:`steer` trait
|
|
372
|
-
r0 = Property()
|
|
373
|
-
|
|
374
|
-
def _get_r0(self):
|
|
375
|
-
return self._steer_obj.r0
|
|
376
|
-
|
|
377
|
-
rm = Property()
|
|
378
|
-
|
|
379
|
-
def _get_rm(self):
|
|
380
|
-
return self._steer_obj.rm
|
|
381
|
-
|
|
382
|
-
# --- End of backwards compatibility traits --------------------------------------
|
|
314
|
+
steer = Instance(SteeringVector, args=())
|
|
383
315
|
|
|
384
316
|
#: :class:`~acoular.spectra.PowerSpectra` object that provides the
|
|
385
317
|
#: cross spectral matrix and eigenvalues
|
|
386
|
-
freq_data =
|
|
318
|
+
freq_data = Instance(PowerSpectra, desc='freq data object')
|
|
387
319
|
|
|
388
320
|
#: Boolean flag, if 'True' (default), the main diagonal is removed before beamforming.
|
|
389
321
|
r_diag = Bool(True, desc='removal of diagonal')
|
|
@@ -400,7 +332,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
400
332
|
)
|
|
401
333
|
|
|
402
334
|
#: Floating point precision of property result. Corresponding to numpy dtypes. Default = 64 Bit.
|
|
403
|
-
precision =
|
|
335
|
+
precision = Enum('float64', 'float32', desc='precision (32/64 Bit) of result, corresponding to numpy dtypes')
|
|
404
336
|
|
|
405
337
|
#: Boolean flag, if 'True' (default), the result is cached in h5 files.
|
|
406
338
|
cached = Bool(True, desc='cached flag')
|
|
@@ -417,6 +349,12 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
417
349
|
# internal identifier
|
|
418
350
|
digest = Property(depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES)
|
|
419
351
|
|
|
352
|
+
# private traits
|
|
353
|
+
_ac = Any(desc='beamforming result')
|
|
354
|
+
_fr = Any(desc='flag for beamforming result at frequency index')
|
|
355
|
+
_f = CArray(dtype='float64', desc='frequencies')
|
|
356
|
+
_numfreq = Int(desc='number of frequencies')
|
|
357
|
+
|
|
420
358
|
@cached_property
|
|
421
359
|
def _get_digest(self):
|
|
422
360
|
return digest(self)
|
|
@@ -442,7 +380,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
442
380
|
# print("no data existent for nodename:", nodename)
|
|
443
381
|
if config.global_caching == 'readonly':
|
|
444
382
|
return (None, None, None)
|
|
445
|
-
numfreq = self.freq_data.fftfreq().shape[0]
|
|
383
|
+
numfreq = self.freq_data.fftfreq().shape[0]
|
|
446
384
|
group = self.h5f.create_new_group(nodename)
|
|
447
385
|
self.h5f.create_compressible_array(
|
|
448
386
|
'freqs',
|
|
@@ -472,11 +410,12 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
472
410
|
return (ac, fr, gpos)
|
|
473
411
|
|
|
474
412
|
def _assert_equal_channels(self):
|
|
475
|
-
|
|
476
|
-
if
|
|
477
|
-
|
|
413
|
+
num_channels = self.freq_data.num_channels
|
|
414
|
+
if num_channels != self.steer.mics.num_mics or num_channels == 0:
|
|
415
|
+
msg = f'{num_channels:d} channels do not fit {self.steer.mics.num_mics:d} mics'
|
|
416
|
+
raise ValueError(msg)
|
|
478
417
|
|
|
479
|
-
@property_depends_on('digest')
|
|
418
|
+
@property_depends_on(['digest'])
|
|
480
419
|
def _get_result(self):
|
|
481
420
|
"""Implements the :attr:`result` getter routine.
|
|
482
421
|
The beamforming result is either loaded or calculated.
|
|
@@ -516,7 +455,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
516
455
|
if not self.r_diag: # Full CSM --> no normalization needed
|
|
517
456
|
normfactor = 1.0
|
|
518
457
|
elif self.r_diag_norm == 0.0: # Removed diag: standard normalization factor
|
|
519
|
-
nMics = float(self.freq_data.
|
|
458
|
+
nMics = float(self.freq_data.num_channels)
|
|
520
459
|
normfactor = nMics / (nMics - 1)
|
|
521
460
|
elif self.r_diag_norm != 0.0: # Removed diag: user defined normalization factor
|
|
522
461
|
normfactor = self.r_diag_norm
|
|
@@ -533,7 +472,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
533
472
|
- Function for frequency-dependent steering vector calculation
|
|
534
473
|
|
|
535
474
|
"""
|
|
536
|
-
if type(self.steer)
|
|
475
|
+
if type(self.steer) is SteeringVector: # for simple steering vector, use faster method
|
|
537
476
|
param_type = self.steer.steer_type
|
|
538
477
|
|
|
539
478
|
def param_steer_func(f):
|
|
@@ -607,7 +546,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
607
546
|
each grid point .
|
|
608
547
|
Note that the frequency resolution and therefore the bandwidth
|
|
609
548
|
represented by a single frequency line depends on
|
|
610
|
-
the :attr:`sampling frequency<acoular.
|
|
549
|
+
the :attr:`sampling frequency<acoular.base.SamplesGenerator.sample_freq>` and
|
|
611
550
|
used :attr:`FFT block size<acoular.spectra.PowerSpectra.block_size>`.
|
|
612
551
|
|
|
613
552
|
"""
|
|
@@ -621,7 +560,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
621
560
|
ind = searchsorted(freq, f)
|
|
622
561
|
if ind >= len(freq):
|
|
623
562
|
warn(
|
|
624
|
-
'Queried frequency (
|
|
563
|
+
f'Queried frequency ({f:g} Hz) not in resolved frequency range. Returning zeros.',
|
|
625
564
|
Warning,
|
|
626
565
|
stacklevel=2,
|
|
627
566
|
)
|
|
@@ -759,7 +698,10 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
759
698
|
|
|
760
699
|
|
|
761
700
|
class BeamformerFunctional(BeamformerBase):
|
|
762
|
-
"""Functional beamforming
|
|
701
|
+
"""Functional beamforming algorithm.
|
|
702
|
+
|
|
703
|
+
See :cite:`Dougherty2014` for details.
|
|
704
|
+
"""
|
|
763
705
|
|
|
764
706
|
#: Functional exponent, defaults to 1 (= Classic Beamforming).
|
|
765
707
|
gamma = Float(1, desc='functional exponent')
|
|
@@ -767,7 +709,8 @@ class BeamformerFunctional(BeamformerBase):
|
|
|
767
709
|
#: Functional Beamforming is only well defined for full CSM
|
|
768
710
|
r_diag = Enum(False, desc='False, as Functional Beamformer is only well defined for the full CSM')
|
|
769
711
|
|
|
770
|
-
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Functional
|
|
712
|
+
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Functional
|
|
713
|
+
#: Beamforming is only well defined for full CSM.
|
|
771
714
|
r_diag_norm = Enum(
|
|
772
715
|
1.0,
|
|
773
716
|
desc='No normalization needed. Functional Beamforming is only well defined for full CSM.',
|
|
@@ -802,14 +745,15 @@ class BeamformerFunctional(BeamformerBase):
|
|
|
802
745
|
normfactor = self.sig_loss_norm()
|
|
803
746
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
804
747
|
for i in ind:
|
|
805
|
-
if self.r_diag:
|
|
748
|
+
if self.r_diag: # pragma: no cover
|
|
806
749
|
# This case is not used at the moment (see Trait r_diag)
|
|
807
750
|
# It would need some testing as structural changes were not tested...
|
|
808
751
|
# ==============================================================================
|
|
809
|
-
# One cannot use spectral decomposition when diagonal of csm is
|
|
810
|
-
# as the resulting modified eigenvectors are not
|
|
811
|
-
#
|
|
812
|
-
#
|
|
752
|
+
# One cannot use spectral decomposition when diagonal of csm is
|
|
753
|
+
# removed, as the resulting modified eigenvectors are not
|
|
754
|
+
# orthogonal to each other anymore. Therefore potentiating
|
|
755
|
+
# cannot be applied only to the eigenvalues. --> To avoid this
|
|
756
|
+
# the root of the csm (removed diag) is calculated directly.
|
|
813
757
|
# WATCH OUT: This doesn't really produce good results.
|
|
814
758
|
# ==============================================================================
|
|
815
759
|
csm = self.freq_data.csm[i]
|
|
@@ -845,13 +789,17 @@ class BeamformerFunctional(BeamformerBase):
|
|
|
845
789
|
|
|
846
790
|
|
|
847
791
|
class BeamformerCapon(BeamformerBase):
|
|
848
|
-
"""Beamforming using the Capon (Mininimum Variance) algorithm
|
|
792
|
+
"""Beamforming using the Capon (Mininimum Variance) algorithm.
|
|
793
|
+
|
|
794
|
+
See :cite:`Capon1969` for details.
|
|
795
|
+
"""
|
|
849
796
|
|
|
850
797
|
# Boolean flag, if 'True', the main diagonal is removed before beamforming;
|
|
851
798
|
# for Capon beamforming r_diag is set to 'False'.
|
|
852
799
|
r_diag = Enum(False, desc='removal of diagonal')
|
|
853
800
|
|
|
854
|
-
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Beamformer Capon
|
|
801
|
+
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Beamformer Capon
|
|
802
|
+
#: is only well defined for full CSM.
|
|
855
803
|
r_diag_norm = Enum(
|
|
856
804
|
1.0,
|
|
857
805
|
desc='No normalization. BeamformerCapon is only well defined for full CSM.',
|
|
@@ -876,22 +824,25 @@ class BeamformerCapon(BeamformerBase):
|
|
|
876
824
|
|
|
877
825
|
"""
|
|
878
826
|
f = self._f
|
|
879
|
-
nMics = self.freq_data.
|
|
827
|
+
nMics = self.freq_data.num_channels
|
|
880
828
|
normfactor = self.sig_loss_norm() * nMics**2
|
|
881
829
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
882
830
|
for i in ind:
|
|
883
|
-
csm = array(
|
|
831
|
+
csm = array(inv(array(self.freq_data.csm[i], dtype='complex128')), order='C')
|
|
884
832
|
beamformerOutput = beamformerFreq(param_steer_type, self.r_diag, normfactor, steer_vector(f[i]), csm)[0]
|
|
885
833
|
self._ac[i] = 1.0 / beamformerOutput
|
|
886
834
|
self._fr[i] = 1
|
|
887
835
|
|
|
888
836
|
|
|
889
837
|
class BeamformerEig(BeamformerBase):
|
|
890
|
-
"""Beamforming using eigenvalue and eigenvector techniques
|
|
838
|
+
"""Beamforming using eigenvalue and eigenvector techniques.
|
|
839
|
+
|
|
840
|
+
See :cite:`Sarradj2005` for details.
|
|
841
|
+
"""
|
|
891
842
|
|
|
892
843
|
#: Number of component to calculate:
|
|
893
|
-
#: 0 (smallest) ... :attr:`~acoular.
|
|
894
|
-
#: defaults to -1, i.e.
|
|
844
|
+
#: 0 (smallest) ... :attr:`~acoular.base.SamplesGenerator.num_channels`-1;
|
|
845
|
+
#: defaults to -1, i.e. num_channels-1
|
|
895
846
|
n = Int(-1, desc='No. of eigenvalue')
|
|
896
847
|
|
|
897
848
|
# Actual component to calculate, internal, readonly.
|
|
@@ -904,7 +855,7 @@ class BeamformerEig(BeamformerBase):
|
|
|
904
855
|
def _get_digest(self):
|
|
905
856
|
return digest(self)
|
|
906
857
|
|
|
907
|
-
@property_depends_on('steer.mics, n')
|
|
858
|
+
@property_depends_on(['steer.mics', 'n'])
|
|
908
859
|
def _get_na(self):
|
|
909
860
|
na = self.n
|
|
910
861
|
nm = self.steer.mics.num_mics
|
|
@@ -952,13 +903,17 @@ class BeamformerEig(BeamformerBase):
|
|
|
952
903
|
|
|
953
904
|
|
|
954
905
|
class BeamformerMusic(BeamformerEig):
|
|
955
|
-
"""Beamforming using the MUSIC algorithm
|
|
906
|
+
"""Beamforming using the MUSIC algorithm.
|
|
907
|
+
|
|
908
|
+
See :cite:`Schmidt1986` for details.
|
|
909
|
+
"""
|
|
956
910
|
|
|
957
911
|
# Boolean flag, if 'True', the main diagonal is removed before beamforming;
|
|
958
912
|
# for MUSIC beamforming r_diag is set to 'False'.
|
|
959
913
|
r_diag = Enum(False, desc='removal of diagonal')
|
|
960
914
|
|
|
961
|
-
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since BeamformerMusic
|
|
915
|
+
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since BeamformerMusic
|
|
916
|
+
#: is only well defined for full CSM.
|
|
962
917
|
r_diag_norm = Enum(
|
|
963
918
|
1.0,
|
|
964
919
|
desc='No normalization. BeamformerMusic is only well defined for full CSM.',
|
|
@@ -987,7 +942,7 @@ class BeamformerMusic(BeamformerEig):
|
|
|
987
942
|
|
|
988
943
|
"""
|
|
989
944
|
f = self._f
|
|
990
|
-
nMics = self.freq_data.
|
|
945
|
+
nMics = self.freq_data.num_channels
|
|
991
946
|
n = int(self.steer.mics.num_mics - self.na)
|
|
992
947
|
normfactor = self.sig_loss_norm() * nMics**2
|
|
993
948
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
@@ -1005,7 +960,7 @@ class BeamformerMusic(BeamformerEig):
|
|
|
1005
960
|
self._fr[i] = 1
|
|
1006
961
|
|
|
1007
962
|
|
|
1008
|
-
class PointSpreadFunction(
|
|
963
|
+
class PointSpreadFunction(HasStrictTraits):
|
|
1009
964
|
"""The point spread function.
|
|
1010
965
|
|
|
1011
966
|
This class provides tools to calculate the PSF depending on the used
|
|
@@ -1017,113 +972,30 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1017
972
|
# Instance of :class:`~acoular.fbeamform.SteeringVector` or its derived classes
|
|
1018
973
|
# that contains information about the steering vector. This is a private trait.
|
|
1019
974
|
# Do not set this directly, use `steer` trait instead.
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
#: :class:`~acoular.fbeamform.SteeringVector` or derived object.
|
|
1023
|
-
#: Defaults to :class:`~acoular.fbeamform.SteeringVector` object.
|
|
1024
|
-
steer = Property(desc='steering vector object')
|
|
1025
|
-
|
|
1026
|
-
def _get_steer(self):
|
|
1027
|
-
return self._steer_obj
|
|
1028
|
-
|
|
1029
|
-
def _set_steer(self, steer):
|
|
1030
|
-
if isinstance(steer, SteeringVector):
|
|
1031
|
-
self._steer_obj = steer
|
|
1032
|
-
elif steer in ('true level', 'true location', 'classic', 'inverse'):
|
|
1033
|
-
# Type of steering vectors, see also :ref:`Sarradj, 2012<Sarradj2012>`.
|
|
1034
|
-
warn(
|
|
1035
|
-
"Deprecated use of 'steer' trait. Please use object of class 'SteeringVector' in the future.",
|
|
1036
|
-
Warning,
|
|
1037
|
-
stacklevel=2,
|
|
1038
|
-
)
|
|
1039
|
-
self._steer_obj = SteeringVector(steer_type=steer)
|
|
1040
|
-
else:
|
|
1041
|
-
raise (TraitError(args=self, name='steer', info='SteeringVector', value=steer))
|
|
1042
|
-
|
|
1043
|
-
# --- List of backwards compatibility traits and their setters/getters -----------
|
|
1044
|
-
|
|
1045
|
-
# :class:`~acoular.environments.Environment` or derived object.
|
|
1046
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1047
|
-
# Now governed by :attr:`steer` trait.
|
|
1048
|
-
env = Property()
|
|
1049
|
-
|
|
1050
|
-
def _get_env(self):
|
|
1051
|
-
return self._steer_obj.env
|
|
1052
|
-
|
|
1053
|
-
def _set_env(self, env):
|
|
1054
|
-
warn("Deprecated use of 'env' trait. ", Warning, stacklevel=2)
|
|
1055
|
-
self._steer_obj.env = env
|
|
1056
|
-
|
|
1057
|
-
# The speed of sound.
|
|
1058
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1059
|
-
# Now governed by :attr:`steer` trait.
|
|
1060
|
-
c = Property()
|
|
1061
|
-
|
|
1062
|
-
def _get_c(self):
|
|
1063
|
-
return self._steer_obj.env.c
|
|
1064
|
-
|
|
1065
|
-
def _set_c(self, c):
|
|
1066
|
-
warn("Deprecated use of 'c' trait. ", Warning, stacklevel=2)
|
|
1067
|
-
self._steer_obj.env.c = c
|
|
1068
|
-
|
|
1069
|
-
# :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
|
|
1070
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1071
|
-
# Now governed by :attr:`steer` trait.
|
|
1072
|
-
grid = Property()
|
|
1073
|
-
|
|
1074
|
-
def _get_grid(self):
|
|
1075
|
-
return self._steer_obj.grid
|
|
1076
|
-
|
|
1077
|
-
def _set_grid(self, grid):
|
|
1078
|
-
warn("Deprecated use of 'grid' trait. ", Warning, stacklevel=2)
|
|
1079
|
-
self._steer_obj.grid = grid
|
|
1080
|
-
|
|
1081
|
-
# :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
|
|
1082
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1083
|
-
# Now governed by :attr:`steer` trait
|
|
1084
|
-
mpos = Property()
|
|
1085
|
-
|
|
1086
|
-
def _get_mpos(self):
|
|
1087
|
-
return self._steer_obj.mics
|
|
1088
|
-
|
|
1089
|
-
def _set_mpos(self, mpos):
|
|
1090
|
-
warn("Deprecated use of 'mpos' trait. ", Warning, stacklevel=2)
|
|
1091
|
-
self._steer_obj.mics = mpos
|
|
1092
|
-
|
|
1093
|
-
# Sound travel distances from microphone array center to grid points (r0)
|
|
1094
|
-
# and all array mics to grid points (rm). Readonly.
|
|
1095
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1096
|
-
# Now governed by :attr:`steer` trait
|
|
1097
|
-
r0 = Property()
|
|
1098
|
-
|
|
1099
|
-
def _get_r0(self):
|
|
1100
|
-
return self._steer_obj.r0
|
|
1101
|
-
|
|
1102
|
-
rm = Property()
|
|
1103
|
-
|
|
1104
|
-
def _get_rm(self):
|
|
1105
|
-
return self._steer_obj.rm
|
|
1106
|
-
|
|
1107
|
-
# --- End of backwards compatibility traits --------------------------------------
|
|
975
|
+
steer = Instance(SteeringVector, args=())
|
|
1108
976
|
|
|
1109
977
|
#: Indices of grid points to calculate the PSF for.
|
|
1110
978
|
grid_indices = CArray(
|
|
1111
979
|
dtype=int,
|
|
1112
980
|
value=array([]),
|
|
1113
981
|
desc='indices of grid points for psf',
|
|
1114
|
-
) # value=array([]), value=self.grid.pos(),
|
|
982
|
+
) # value=array([]), value=self.steer.grid.pos(),
|
|
1115
983
|
|
|
1116
984
|
#: Flag that defines how to calculate and store the point spread function
|
|
1117
985
|
#: defaults to 'single'.
|
|
1118
986
|
#:
|
|
1119
|
-
#: * 'full': Calculate the full PSF (for all grid points) in one go (should be used if the PSF
|
|
1120
|
-
#:
|
|
1121
|
-
#: * '
|
|
1122
|
-
#:
|
|
1123
|
-
|
|
987
|
+
#: * 'full': Calculate the full PSF (for all grid points) in one go (should be used if the PSF
|
|
988
|
+
#: at all grid points is needed, as with :class:`DAMAS<BeamformerDamas>`)
|
|
989
|
+
#: * 'single': Calculate the PSF for the grid points defined by :attr:`grid_indices`, one by one
|
|
990
|
+
#: (useful if not all PSFs are needed, as with :class:`CLEAN<BeamformerClean>`)
|
|
991
|
+
#: * 'block': Calculate the PSF for the grid points defined by :attr:`grid_indices`, in one go
|
|
992
|
+
#: (useful if not all PSFs are needed, as with :class:`CLEAN<BeamformerClean>`)
|
|
993
|
+
#: * 'readonly': Do not attempt to calculate the PSF since it should already be cached (useful
|
|
994
|
+
#: if multiple processes have to access the cache file)
|
|
995
|
+
calcmode = Enum('single', 'block', 'full', 'readonly', desc='mode of calculation / storage')
|
|
1124
996
|
|
|
1125
997
|
#: Floating point precision of property psf. Corresponding to numpy dtypes. Default = 64 Bit.
|
|
1126
|
-
precision =
|
|
998
|
+
precision = Enum('float64', 'float32', desc='precision (32/64 Bit) of result, corresponding to numpy dtypes')
|
|
1127
999
|
|
|
1128
1000
|
#: The actual point spread function.
|
|
1129
1001
|
psf = Property(desc='point spread function')
|
|
@@ -1135,7 +1007,7 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1135
1007
|
h5f = Instance(H5CacheFileBase, transient=True)
|
|
1136
1008
|
|
|
1137
1009
|
# internal identifier
|
|
1138
|
-
digest = Property(depends_on=['
|
|
1010
|
+
digest = Property(depends_on=['steer.digest', 'precision'], cached=True)
|
|
1139
1011
|
|
|
1140
1012
|
@cached_property
|
|
1141
1013
|
def _get_digest(self):
|
|
@@ -1147,7 +1019,7 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1147
1019
|
exist and global caching mode is 'readonly'.
|
|
1148
1020
|
"""
|
|
1149
1021
|
filename = 'psf' + self.digest
|
|
1150
|
-
nodename = ('Hz_
|
|
1022
|
+
nodename = (f'Hz_{self.freq:.2f}').replace('.', '_')
|
|
1151
1023
|
# print("get cachefile:", filename)
|
|
1152
1024
|
H5cache.get_cache_file(self, filename)
|
|
1153
1025
|
if not self.h5f: # only happens in case of global caching readonly
|
|
@@ -1244,40 +1116,46 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1244
1116
|
Parameters
|
|
1245
1117
|
----------
|
|
1246
1118
|
ind : list of int
|
|
1247
|
-
Indices of gridpoints which are assumed to be sources.
|
|
1248
|
-
|
|
1119
|
+
Indices of gridpoints which are assumed to be sources. Normalization factor for the
|
|
1120
|
+
beamforming result (e.g. removal of diag is compensated with this.)
|
|
1249
1121
|
|
|
1250
1122
|
Returns
|
|
1251
1123
|
-------
|
|
1252
1124
|
The psf [1, nGridPoints, len(ind)]
|
|
1253
|
-
|
|
1254
1125
|
"""
|
|
1255
|
-
if type(self.steer)
|
|
1126
|
+
if type(self.steer) is SteeringVector: # for simple steering vector, use faster method
|
|
1256
1127
|
result = calcPointSpreadFunction(
|
|
1257
1128
|
self.steer.steer_type,
|
|
1258
1129
|
self.steer.r0,
|
|
1259
1130
|
self.steer.rm,
|
|
1260
|
-
2 * pi * self.freq / self.env.c,
|
|
1131
|
+
2 * pi * self.freq / self.steer.env.c,
|
|
1261
1132
|
ind,
|
|
1262
1133
|
self.precision,
|
|
1263
1134
|
)
|
|
1264
|
-
else:
|
|
1265
|
-
# there is a version of this in
|
|
1135
|
+
else:
|
|
1136
|
+
# for arbitrary steering sectors, use general calculation. there is a version of this in
|
|
1137
|
+
# fastFuncs, may be used later after runtime testing and debugging
|
|
1266
1138
|
product = dot(self.steer.steer_vector(self.freq).conj(), self.steer.transfer(self.freq, ind).T)
|
|
1267
1139
|
result = (product * product.conj()).real
|
|
1268
1140
|
return result
|
|
1269
1141
|
|
|
1270
1142
|
|
|
1271
1143
|
class BeamformerDamas(BeamformerBase):
|
|
1272
|
-
"""DAMAS deconvolution
|
|
1144
|
+
"""DAMAS deconvolution algorithm.
|
|
1145
|
+
|
|
1146
|
+
See :cite:`Brooks2006` for details.
|
|
1147
|
+
"""
|
|
1273
1148
|
|
|
1274
1149
|
#: (only for backward compatibility) :class:`BeamformerBase` object
|
|
1275
1150
|
#: if set, provides :attr:`freq_data`, :attr:`steer`, :attr:`r_diag`
|
|
1276
|
-
#: if not set, these have to be set explicitly
|
|
1277
|
-
beamformer =
|
|
1151
|
+
#: if not set, these have to be set explicitly.
|
|
1152
|
+
beamformer = Property()
|
|
1153
|
+
|
|
1154
|
+
# private storage of beamformer instance
|
|
1155
|
+
_beamformer = Instance(BeamformerBase)
|
|
1278
1156
|
|
|
1279
1157
|
#: The floating-number-precision of the PSFs. Default is 64 bit.
|
|
1280
|
-
psf_precision =
|
|
1158
|
+
psf_precision = Enum('float64', 'float32', desc='precision of PSF')
|
|
1281
1159
|
|
|
1282
1160
|
#: Number of iterations, defaults to 100.
|
|
1283
1161
|
n_iter = Int(100, desc='number of iterations')
|
|
@@ -1287,18 +1165,34 @@ class BeamformerDamas(BeamformerBase):
|
|
|
1287
1165
|
|
|
1288
1166
|
#: Flag that defines how to calculate and store the point spread function,
|
|
1289
1167
|
#: defaults to 'full'. See :attr:`PointSpreadFunction.calcmode` for details.
|
|
1290
|
-
calcmode =
|
|
1168
|
+
calcmode = Enum('full', 'single', 'block', 'readonly', desc='mode of psf calculation / storage')
|
|
1291
1169
|
|
|
1292
1170
|
# internal identifier
|
|
1293
1171
|
digest = Property(
|
|
1294
1172
|
depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['n_iter', 'damp', 'psf_precision'],
|
|
1295
1173
|
)
|
|
1296
1174
|
|
|
1175
|
+
def _get_beamformer(self):
|
|
1176
|
+
return self._beamformer
|
|
1177
|
+
|
|
1178
|
+
def _set_beamformer(self, beamformer):
|
|
1179
|
+
msg = (
|
|
1180
|
+
f"Deprecated use of 'beamformer' trait in class {self.__class__.__name__}. "
|
|
1181
|
+
'Please set :attr:`freq_data`, :attr:`steer`, :attr:`r_diag` directly. '
|
|
1182
|
+
"Using the 'beamformer' trait will be removed in version 25.07."
|
|
1183
|
+
)
|
|
1184
|
+
warn(
|
|
1185
|
+
msg,
|
|
1186
|
+
DeprecationWarning,
|
|
1187
|
+
stacklevel=2,
|
|
1188
|
+
)
|
|
1189
|
+
self._beamformer = beamformer
|
|
1190
|
+
|
|
1297
1191
|
@cached_property
|
|
1298
1192
|
def _get_digest(self):
|
|
1299
1193
|
return digest(self)
|
|
1300
1194
|
|
|
1301
|
-
@on_trait_change('
|
|
1195
|
+
@on_trait_change('_beamformer.digest')
|
|
1302
1196
|
def delegate_beamformer_traits(self):
|
|
1303
1197
|
self.freq_data = self.beamformer.freq_data
|
|
1304
1198
|
self.r_diag = self.beamformer.r_diag
|
|
@@ -1346,12 +1240,12 @@ class BeamformerDamas(BeamformerBase):
|
|
|
1346
1240
|
self._fr[i] = 1
|
|
1347
1241
|
|
|
1348
1242
|
|
|
1243
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
1349
1244
|
class BeamformerDamasPlus(BeamformerDamas):
|
|
1350
|
-
"""DAMAS deconvolution
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
Needs a-priori delay-and-sum beamforming (:class:`BeamformerBase`).
|
|
1245
|
+
"""DAMAS deconvolution :cite:`Brooks2006` for solving the system of equations, instead of the
|
|
1246
|
+
original Gauss-Seidel iterations, this class employs the NNLS or linear programming solvers from
|
|
1247
|
+
scipy.optimize or one of several optimization algorithms from the scikit-learn module. Needs
|
|
1248
|
+
a-priori delay-and-sum beamforming (:class:`BeamformerBase`).
|
|
1355
1249
|
"""
|
|
1356
1250
|
|
|
1357
1251
|
#: Type of fit method to be used ('LassoLars',
|
|
@@ -1359,7 +1253,7 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1359
1253
|
#: These methods are implemented in
|
|
1360
1254
|
#: the `scikit-learn <http://scikit-learn.org/stable/user_guide.html>`_
|
|
1361
1255
|
#: module or within scipy.optimize respectively.
|
|
1362
|
-
method =
|
|
1256
|
+
method = Enum('NNLS', 'LP', 'LassoLars', 'OMPCV', desc='method used for solving deconvolution problem')
|
|
1363
1257
|
|
|
1364
1258
|
#: Weight factor for LassoLars method,
|
|
1365
1259
|
#: defaults to 0.0.
|
|
@@ -1369,7 +1263,7 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1369
1263
|
#: Maximum number of iterations,
|
|
1370
1264
|
#: tradeoff between speed and precision;
|
|
1371
1265
|
#: defaults to 500
|
|
1372
|
-
|
|
1266
|
+
n_iter = Int(500, desc='maximum number of iterations')
|
|
1373
1267
|
|
|
1374
1268
|
#: Unit multiplier for evaluating, e.g., nPa instead of Pa.
|
|
1375
1269
|
#: Values are converted back before returning.
|
|
@@ -1379,7 +1273,7 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1379
1273
|
|
|
1380
1274
|
# internal identifier
|
|
1381
1275
|
digest = Property(
|
|
1382
|
-
depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['alpha', 'method', '
|
|
1276
|
+
depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['alpha', 'method', 'n_iter', 'unit_mult'],
|
|
1383
1277
|
)
|
|
1384
1278
|
|
|
1385
1279
|
@cached_property
|
|
@@ -1439,19 +1333,17 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1439
1333
|
self._ac[i] = linprog(c=cT, A_ub=psf, b_ub=y).x / unit # defaults to simplex method and non-negative x
|
|
1440
1334
|
else:
|
|
1441
1335
|
if self.method == 'LassoLars':
|
|
1442
|
-
model = LassoLars(
|
|
1443
|
-
alpha=self.alpha * unit,
|
|
1444
|
-
max_iter=self.max_iter,
|
|
1445
|
-
)
|
|
1336
|
+
model = LassoLars(alpha=self.alpha * unit, max_iter=self.n_iter, positive=True)
|
|
1446
1337
|
elif self.method == 'OMPCV':
|
|
1447
1338
|
model = OrthogonalMatchingPursuitCV()
|
|
1448
1339
|
else:
|
|
1449
1340
|
msg = f'Method {self.method} not implemented.'
|
|
1450
1341
|
raise NotImplementedError(msg)
|
|
1451
1342
|
model.normalize = False
|
|
1452
|
-
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1453
|
-
# with StandardScaler does scale in a different way, thus we
|
|
1454
|
-
# ourselves to make results the same over
|
|
1343
|
+
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1344
|
+
# pipeline approach with StandardScaler does scale in a different way, thus we
|
|
1345
|
+
# monkeypatch the code and normalize ourselves to make results the same over
|
|
1346
|
+
# different sklearn versions
|
|
1455
1347
|
norms = norm(psf, axis=0)
|
|
1456
1348
|
# get rid of annoying sklearn warnings that appear
|
|
1457
1349
|
# for sklearn<1.2 despite any settings
|
|
@@ -1465,14 +1357,19 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1465
1357
|
|
|
1466
1358
|
|
|
1467
1359
|
class BeamformerOrth(BeamformerBase):
|
|
1468
|
-
"""Orthogonal deconvolution
|
|
1360
|
+
"""Orthogonal deconvolution algorithm.
|
|
1361
|
+
|
|
1362
|
+
See :cite:`Sarradj2010` for details.
|
|
1469
1363
|
New faster implementation without explicit (:class:`BeamformerEig`).
|
|
1470
1364
|
"""
|
|
1471
1365
|
|
|
1472
1366
|
#: (only for backward compatibility) :class:`BeamformerEig` object
|
|
1473
1367
|
#: if set, provides :attr:`freq_data`, :attr:`steer`, :attr:`r_diag`
|
|
1474
|
-
#: if not set, these have to be set explicitly
|
|
1475
|
-
beamformer =
|
|
1368
|
+
#: if not set, these have to be set explicitly.
|
|
1369
|
+
beamformer = Property()
|
|
1370
|
+
|
|
1371
|
+
# private storage of beamformer instance
|
|
1372
|
+
_beamformer = Instance(BeamformerEig)
|
|
1476
1373
|
|
|
1477
1374
|
#: List of components to consider, use this to directly set the eigenvalues
|
|
1478
1375
|
#: used in the beamformer. Alternatively, set :attr:`n`.
|
|
@@ -1489,11 +1386,27 @@ class BeamformerOrth(BeamformerBase):
|
|
|
1489
1386
|
depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['eva_list'],
|
|
1490
1387
|
)
|
|
1491
1388
|
|
|
1389
|
+
def _get_beamformer(self):
|
|
1390
|
+
return self._beamformer
|
|
1391
|
+
|
|
1392
|
+
def _set_beamformer(self, beamformer):
|
|
1393
|
+
msg = (
|
|
1394
|
+
f"Deprecated use of 'beamformer' trait in class {self.__class__.__name__}. "
|
|
1395
|
+
'Please set :attr:`freq_data`, :attr:`steer`, :attr:`r_diag` directly. '
|
|
1396
|
+
"Using the 'beamformer' trait will be removed in version 25.07."
|
|
1397
|
+
)
|
|
1398
|
+
warn(
|
|
1399
|
+
msg,
|
|
1400
|
+
DeprecationWarning,
|
|
1401
|
+
stacklevel=2,
|
|
1402
|
+
)
|
|
1403
|
+
self._beamformer = beamformer
|
|
1404
|
+
|
|
1492
1405
|
@cached_property
|
|
1493
1406
|
def _get_digest(self):
|
|
1494
1407
|
return digest(self)
|
|
1495
1408
|
|
|
1496
|
-
@on_trait_change('
|
|
1409
|
+
@on_trait_change('_beamformer.digest')
|
|
1497
1410
|
def delegate_beamformer_traits(self):
|
|
1498
1411
|
self.freq_data = self.beamformer.freq_data
|
|
1499
1412
|
self.r_diag = self.beamformer.r_diag
|
|
@@ -1523,7 +1436,7 @@ class BeamformerOrth(BeamformerBase):
|
|
|
1523
1436
|
|
|
1524
1437
|
"""
|
|
1525
1438
|
f = self._f
|
|
1526
|
-
|
|
1439
|
+
num_channels = self.freq_data.num_channels
|
|
1527
1440
|
normfactor = self.sig_loss_norm()
|
|
1528
1441
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
1529
1442
|
for i in ind:
|
|
@@ -1537,18 +1450,21 @@ class BeamformerOrth(BeamformerBase):
|
|
|
1537
1450
|
steer_vector(f[i]),
|
|
1538
1451
|
(ones(1), eve[:, n].reshape((-1, 1))),
|
|
1539
1452
|
)[0]
|
|
1540
|
-
self._ac[i, beamformerOutput.argmax()] += eva[n] /
|
|
1453
|
+
self._ac[i, beamformerOutput.argmax()] += eva[n] / num_channels
|
|
1541
1454
|
self._fr[i] = 1
|
|
1542
1455
|
|
|
1543
1456
|
|
|
1457
|
+
@deprecated_alias({'n': 'n_iter'})
|
|
1544
1458
|
class BeamformerCleansc(BeamformerBase):
|
|
1545
|
-
"""CLEAN-SC deconvolution
|
|
1459
|
+
"""CLEAN-SC deconvolution algorithm.
|
|
1460
|
+
|
|
1461
|
+
See :cite:`Sijtsma2007` for details.
|
|
1546
1462
|
Classic delay-and-sum beamforming is already included.
|
|
1547
1463
|
"""
|
|
1548
1464
|
|
|
1549
1465
|
#: no of CLEAN-SC iterations
|
|
1550
|
-
#: defaults to 0, i.e. automatic (max 2*
|
|
1551
|
-
|
|
1466
|
+
#: defaults to 0, i.e. automatic (max 2*num_channels)
|
|
1467
|
+
n_iter = Int(0, desc='no of iterations')
|
|
1552
1468
|
|
|
1553
1469
|
#: iteration damping factor
|
|
1554
1470
|
#: defaults to 0.6
|
|
@@ -1560,7 +1476,7 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1560
1476
|
stopn = Int(3, desc='stop criterion index')
|
|
1561
1477
|
|
|
1562
1478
|
# internal identifier
|
|
1563
|
-
digest = Property(depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['
|
|
1479
|
+
digest = Property(depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['n_iter', 'damp', 'stopn'])
|
|
1564
1480
|
|
|
1565
1481
|
@cached_property
|
|
1566
1482
|
def _get_digest(self):
|
|
@@ -1586,9 +1502,9 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1586
1502
|
"""
|
|
1587
1503
|
f = self._f
|
|
1588
1504
|
normfactor = self.sig_loss_norm()
|
|
1589
|
-
|
|
1505
|
+
num_channels = self.freq_data.num_channels
|
|
1590
1506
|
result = zeros((self.steer.grid.size), 'f')
|
|
1591
|
-
J =
|
|
1507
|
+
J = num_channels * 2 if not self.n_iter else self.n_iter
|
|
1592
1508
|
powers = zeros(J, 'd')
|
|
1593
1509
|
|
|
1594
1510
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
@@ -1615,7 +1531,8 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1615
1531
|
hh = hh[:, newaxis]
|
|
1616
1532
|
csm1 = hmax * (hh * hh.conj().T)
|
|
1617
1533
|
|
|
1618
|
-
# h1 = self.steer._beamformerCall(f[i], self.r_diag, normfactor,
|
|
1534
|
+
# h1 = self.steer._beamformerCall(f[i], self.r_diag, normfactor, \
|
|
1535
|
+
# (array((hmax, ))[newaxis, :], hh[newaxis, :].conjugate()))[0]
|
|
1619
1536
|
h1 = beamformerFreq(
|
|
1620
1537
|
param_steer_type,
|
|
1621
1538
|
self.r_diag,
|
|
@@ -1630,15 +1547,21 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1630
1547
|
|
|
1631
1548
|
|
|
1632
1549
|
class BeamformerClean(BeamformerBase):
|
|
1633
|
-
"""CLEAN deconvolution
|
|
1550
|
+
"""CLEAN deconvolution algorithm.
|
|
1551
|
+
|
|
1552
|
+
See :cite:`Hoegbom1974` for details.
|
|
1553
|
+
"""
|
|
1634
1554
|
|
|
1635
1555
|
#: (only for backward compatibility) :class:`BeamformerBase` object
|
|
1636
1556
|
#: if set, provides :attr:`freq_data`, :attr:`steer`, :attr:`r_diag`
|
|
1637
|
-
#: if not set, these have to be set explicitly
|
|
1638
|
-
beamformer =
|
|
1557
|
+
#: if not set, these have to be set explicitly.
|
|
1558
|
+
beamformer = Property()
|
|
1559
|
+
|
|
1560
|
+
# private storage of beamformer instance
|
|
1561
|
+
_beamformer = Instance(BeamformerBase)
|
|
1639
1562
|
|
|
1640
1563
|
#: The floating-number-precision of the PSFs. Default is 64 bit.
|
|
1641
|
-
psf_precision =
|
|
1564
|
+
psf_precision = Enum('float64', 'float32', desc='precision of PSF.')
|
|
1642
1565
|
|
|
1643
1566
|
# iteration damping factor
|
|
1644
1567
|
# defaults to 0.6
|
|
@@ -1648,7 +1571,7 @@ class BeamformerClean(BeamformerBase):
|
|
|
1648
1571
|
n_iter = Int(100, desc='maximum number of iterations')
|
|
1649
1572
|
|
|
1650
1573
|
# how to calculate and store the psf
|
|
1651
|
-
calcmode =
|
|
1574
|
+
calcmode = Enum('block', 'full', 'single', 'readonly', desc='mode of psf calculation / storage')
|
|
1652
1575
|
|
|
1653
1576
|
# internal identifier
|
|
1654
1577
|
digest = Property(
|
|
@@ -1659,7 +1582,23 @@ class BeamformerClean(BeamformerBase):
|
|
|
1659
1582
|
def _get_digest(self):
|
|
1660
1583
|
return digest(self)
|
|
1661
1584
|
|
|
1662
|
-
|
|
1585
|
+
def _get_beamformer(self):
|
|
1586
|
+
return self._beamformer
|
|
1587
|
+
|
|
1588
|
+
def _set_beamformer(self, beamformer):
|
|
1589
|
+
msg = (
|
|
1590
|
+
f"Deprecated use of 'beamformer' trait in class {self.__class__.__name__}. "
|
|
1591
|
+
'Please set :attr:`freq_data`, :attr:`steer`, :attr:`r_diag` directly. '
|
|
1592
|
+
"Using the 'beamformer' trait will be removed in version 25.07."
|
|
1593
|
+
)
|
|
1594
|
+
warn(
|
|
1595
|
+
msg,
|
|
1596
|
+
DeprecationWarning,
|
|
1597
|
+
stacklevel=2,
|
|
1598
|
+
)
|
|
1599
|
+
self._beamformer = beamformer
|
|
1600
|
+
|
|
1601
|
+
@on_trait_change('_beamformer.digest')
|
|
1663
1602
|
def delegate_beamformer_traits(self):
|
|
1664
1603
|
self.freq_data = self.beamformer.freq_data
|
|
1665
1604
|
self.r_diag = self.beamformer.r_diag
|
|
@@ -1727,9 +1666,12 @@ class BeamformerClean(BeamformerBase):
|
|
|
1727
1666
|
self._fr[i] = 1
|
|
1728
1667
|
|
|
1729
1668
|
|
|
1669
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
1730
1670
|
class BeamformerCMF(BeamformerBase):
|
|
1731
|
-
"""Covariance Matrix Fitting
|
|
1671
|
+
"""Covariance Matrix Fitting algorithm.
|
|
1672
|
+
|
|
1732
1673
|
This is not really a beamformer, but an inverse method.
|
|
1674
|
+
See :cite:`Yardibi2008` for details.
|
|
1733
1675
|
"""
|
|
1734
1676
|
|
|
1735
1677
|
#: Type of fit method to be used ('LassoLars', 'LassoLarsBIC',
|
|
@@ -1737,7 +1679,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1737
1679
|
#: These methods are implemented in
|
|
1738
1680
|
#: the `scikit-learn <http://scikit-learn.org/stable/user_guide.html>`_
|
|
1739
1681
|
#: module.
|
|
1740
|
-
method =
|
|
1682
|
+
method = Enum(
|
|
1741
1683
|
'LassoLars',
|
|
1742
1684
|
'LassoLarsBIC',
|
|
1743
1685
|
'OMPCV',
|
|
@@ -1753,10 +1695,11 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1753
1695
|
#: (Use values in the order of 10^⁻9 for good results.)
|
|
1754
1696
|
alpha = Range(0.0, 1.0, 0.0, desc='Lasso weight factor')
|
|
1755
1697
|
|
|
1756
|
-
#:
|
|
1698
|
+
#: Total or maximum number of iterations
|
|
1699
|
+
#: (depending on :attr:`method`),
|
|
1757
1700
|
#: tradeoff between speed and precision;
|
|
1758
1701
|
#: defaults to 500
|
|
1759
|
-
|
|
1702
|
+
n_iter = Int(500, desc='maximum number of iterations')
|
|
1760
1703
|
|
|
1761
1704
|
#: Unit multiplier for evaluating, e.g., nPa instead of Pa.
|
|
1762
1705
|
#: Values are converted back before returning.
|
|
@@ -1764,7 +1707,8 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1764
1707
|
#: within fitting method algorithms. Defaults to 1e9.
|
|
1765
1708
|
unit_mult = Float(1e9, desc='unit multiplier')
|
|
1766
1709
|
|
|
1767
|
-
#: If True, shows the status of the PyLops solver. Only relevant in case of FISTA or
|
|
1710
|
+
#: If True, shows the status of the PyLops solver. Only relevant in case of FISTA or
|
|
1711
|
+
#: Split_Bregman
|
|
1768
1712
|
show = Bool(False, desc='show output of PyLops solvers')
|
|
1769
1713
|
|
|
1770
1714
|
#: Energy normalization in case of diagonal removal not implemented for inverse methods.
|
|
@@ -1779,7 +1723,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1779
1723
|
'freq_data.digest',
|
|
1780
1724
|
'alpha',
|
|
1781
1725
|
'method',
|
|
1782
|
-
'
|
|
1726
|
+
'n_iter',
|
|
1783
1727
|
'unit_mult',
|
|
1784
1728
|
'r_diag',
|
|
1785
1729
|
'precision',
|
|
@@ -1825,8 +1769,8 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1825
1769
|
return vstack([matrix.real, matrix.imag])
|
|
1826
1770
|
|
|
1827
1771
|
# prepare calculation
|
|
1828
|
-
nc = self.freq_data.
|
|
1829
|
-
|
|
1772
|
+
nc = self.freq_data.num_channels
|
|
1773
|
+
num_points = self.steer.grid.size
|
|
1830
1774
|
unit = self.unit_mult
|
|
1831
1775
|
|
|
1832
1776
|
for i in ind:
|
|
@@ -1836,7 +1780,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1836
1780
|
|
|
1837
1781
|
# reduced Kronecker product (only where solution matrix != 0)
|
|
1838
1782
|
Bc = (h[:, :, newaxis] * h.conjugate().T[newaxis, :, :]).transpose(2, 0, 1)
|
|
1839
|
-
Ac = Bc.reshape(nc * nc,
|
|
1783
|
+
Ac = Bc.reshape(nc * nc, num_points)
|
|
1840
1784
|
|
|
1841
1785
|
# get indices for upper triangular matrices (use tril b/c transposed)
|
|
1842
1786
|
ind = reshape(tril(ones((nc, nc))), (nc * nc,)) > 0
|
|
@@ -1855,24 +1799,25 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1855
1799
|
R = realify(reshape(csm.T, (nc * nc, 1))[ind, :])[ind_reim, :] * unit
|
|
1856
1800
|
# choose method
|
|
1857
1801
|
if self.method == 'LassoLars':
|
|
1858
|
-
model = LassoLars(alpha=self.alpha * unit, max_iter=self.
|
|
1802
|
+
model = LassoLars(alpha=self.alpha * unit, max_iter=self.n_iter, positive=True, **sklearn_ndict)
|
|
1859
1803
|
elif self.method == 'LassoLarsBIC':
|
|
1860
|
-
model = LassoLarsIC(criterion='bic', max_iter=self.
|
|
1804
|
+
model = LassoLarsIC(criterion='bic', max_iter=self.n_iter, positive=True, **sklearn_ndict)
|
|
1861
1805
|
elif self.method == 'OMPCV':
|
|
1862
1806
|
model = OrthogonalMatchingPursuitCV(**sklearn_ndict)
|
|
1863
1807
|
elif self.method == 'NNLS':
|
|
1864
1808
|
model = LinearRegression(positive=True)
|
|
1865
1809
|
|
|
1866
1810
|
if self.method == 'Split_Bregman' and config.have_pylops:
|
|
1867
|
-
from pylops import Identity, MatrixMult
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
self.
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1811
|
+
from pylops import Identity, MatrixMult
|
|
1812
|
+
from pylops.optimization.sparsity import splitbregman
|
|
1813
|
+
|
|
1814
|
+
Oop = MatrixMult(A) # transfer operator
|
|
1815
|
+
Iop = self.alpha * Identity(num_points) # regularisation
|
|
1816
|
+
self._ac[i], iterations, cost = splitbregman(
|
|
1817
|
+
Op=Oop,
|
|
1818
|
+
RegsL1=[Iop],
|
|
1819
|
+
y=R[:, 0],
|
|
1820
|
+
niter_outer=self.n_iter,
|
|
1876
1821
|
niter_inner=5,
|
|
1877
1822
|
RegsL2=None,
|
|
1878
1823
|
dataregsL2=None,
|
|
@@ -1885,17 +1830,16 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1885
1830
|
self._ac[i] /= unit
|
|
1886
1831
|
|
|
1887
1832
|
elif self.method == 'FISTA' and config.have_pylops:
|
|
1888
|
-
from pylops import
|
|
1833
|
+
from pylops import MatrixMult
|
|
1834
|
+
from pylops.optimization.sparsity import fista
|
|
1889
1835
|
|
|
1890
|
-
Oop = MatrixMult(A) #
|
|
1891
|
-
self._ac[i], iterations =
|
|
1836
|
+
Oop = MatrixMult(A) # transfer operator
|
|
1837
|
+
self._ac[i], iterations, cost = fista(
|
|
1892
1838
|
Op=Oop,
|
|
1893
|
-
|
|
1894
|
-
niter=self.
|
|
1839
|
+
y=R[:, 0],
|
|
1840
|
+
niter=self.n_iter,
|
|
1895
1841
|
eps=self.alpha,
|
|
1896
1842
|
alpha=None,
|
|
1897
|
-
eigsiter=None,
|
|
1898
|
-
eigstol=0,
|
|
1899
1843
|
tol=1e-10,
|
|
1900
1844
|
show=self.show,
|
|
1901
1845
|
)
|
|
@@ -1910,9 +1854,9 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1910
1854
|
return func[0].T, der[:, 0]
|
|
1911
1855
|
|
|
1912
1856
|
# initial guess
|
|
1913
|
-
x0 = ones([
|
|
1914
|
-
#
|
|
1915
|
-
|
|
1857
|
+
x0 = ones([num_points])
|
|
1858
|
+
# boundaries - set to non negative
|
|
1859
|
+
boundaries = tile((0, +inf), (len(x0), 1))
|
|
1916
1860
|
|
|
1917
1861
|
# optimize
|
|
1918
1862
|
self._ac[i], yval, dicts = fmin_l_bfgs_b(
|
|
@@ -1921,14 +1865,14 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1921
1865
|
fprime=None,
|
|
1922
1866
|
args=(),
|
|
1923
1867
|
approx_grad=0,
|
|
1924
|
-
bounds=
|
|
1868
|
+
bounds=boundaries,
|
|
1925
1869
|
m=10,
|
|
1926
1870
|
factr=10000000.0,
|
|
1927
1871
|
pgtol=1e-05,
|
|
1928
1872
|
epsilon=1e-08,
|
|
1929
1873
|
iprint=-1,
|
|
1930
1874
|
maxfun=15000,
|
|
1931
|
-
maxiter=self.
|
|
1875
|
+
maxiter=self.n_iter,
|
|
1932
1876
|
disp=None,
|
|
1933
1877
|
callback=None,
|
|
1934
1878
|
maxls=20,
|
|
@@ -1936,11 +1880,12 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1936
1880
|
|
|
1937
1881
|
self._ac[i] /= unit
|
|
1938
1882
|
else:
|
|
1939
|
-
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1940
|
-
# approach with StandardScaler does scale in a different way, thus we
|
|
1941
|
-
# code and normalize ourselves to make results the same over
|
|
1883
|
+
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1884
|
+
# pipeline approach with StandardScaler does scale in a different way, thus we
|
|
1885
|
+
# monkeypatch the code and normalize ourselves to make results the same over
|
|
1886
|
+
# different sklearn versions
|
|
1942
1887
|
norms = norm(A, axis=0)
|
|
1943
|
-
# get rid of
|
|
1888
|
+
# get rid of sklearn warnings that appear for sklearn<1.2 despite any settings
|
|
1944
1889
|
with warnings.catch_warnings():
|
|
1945
1890
|
warnings.simplefilter('ignore', category=FutureWarning)
|
|
1946
1891
|
# normalized A
|
|
@@ -1950,24 +1895,22 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1950
1895
|
self._fr[i] = 1
|
|
1951
1896
|
|
|
1952
1897
|
|
|
1898
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
1953
1899
|
class BeamformerSODIX(BeamformerBase):
|
|
1954
|
-
"""
|
|
1955
|
-
Schallabstrahlung von Turbofantriebwerken, 2017. and
|
|
1956
|
-
Oertwig, Advancements in the source localization method SODIX and
|
|
1957
|
-
application to short cowl engine data, 2019.
|
|
1900
|
+
"""Source directivity modeling in the cross-spectral matrix (SODIX) algorithm.
|
|
1958
1901
|
|
|
1959
|
-
|
|
1902
|
+
See :cite:`Funke2017` and :cite:`Oertwig2019` for details.
|
|
1960
1903
|
"""
|
|
1961
1904
|
|
|
1962
1905
|
#: Type of fit method to be used ('fmin_l_bfgs_b').
|
|
1963
1906
|
#: These methods are implemented in
|
|
1964
1907
|
#: the scipy module.
|
|
1965
|
-
method =
|
|
1908
|
+
method = Enum('fmin_l_bfgs_b', desc='fit method used')
|
|
1966
1909
|
|
|
1967
1910
|
#: Maximum number of iterations,
|
|
1968
1911
|
#: tradeoff between speed and precision;
|
|
1969
1912
|
#: defaults to 200
|
|
1970
|
-
|
|
1913
|
+
n_iter = Int(200, desc='maximum number of iterations')
|
|
1971
1914
|
|
|
1972
1915
|
#: Weight factor for regularization,
|
|
1973
1916
|
#: defaults to 0.0.
|
|
@@ -1991,7 +1934,7 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
1991
1934
|
'freq_data.digest',
|
|
1992
1935
|
'alpha',
|
|
1993
1936
|
'method',
|
|
1994
|
-
'
|
|
1937
|
+
'n_iter',
|
|
1995
1938
|
'unit_mult',
|
|
1996
1939
|
'r_diag',
|
|
1997
1940
|
'precision',
|
|
@@ -2023,7 +1966,7 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2023
1966
|
"""
|
|
2024
1967
|
# prepare calculation
|
|
2025
1968
|
f = self._f
|
|
2026
|
-
|
|
1969
|
+
num_points = self.steer.grid.size
|
|
2027
1970
|
# unit = self.unit_mult
|
|
2028
1971
|
num_mics = self.steer.mics.num_mics
|
|
2029
1972
|
# SODIX needs special treatment as the result from one frequency is used to
|
|
@@ -2044,18 +1987,18 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2044
1987
|
"""Parameters
|
|
2045
1988
|
----------
|
|
2046
1989
|
directions
|
|
2047
|
-
[
|
|
1990
|
+
[num_points*num_mics]
|
|
2048
1991
|
|
|
2049
1992
|
Returns
|
|
2050
1993
|
-------
|
|
2051
1994
|
func - Sodix function to optimize
|
|
2052
1995
|
[1]
|
|
2053
1996
|
derdrl - derivitaives in direction of D
|
|
2054
|
-
[num_mics*
|
|
1997
|
+
[num_mics*num_points].
|
|
2055
1998
|
|
|
2056
1999
|
"""
|
|
2057
2000
|
#### the sodix function ####
|
|
2058
|
-
Djm = directions.reshape([
|
|
2001
|
+
Djm = directions.reshape([num_points, num_mics])
|
|
2059
2002
|
p = h.T * Djm
|
|
2060
2003
|
csm_mod = dot(p.T, p.conj())
|
|
2061
2004
|
Q = csm - csm_mod
|
|
@@ -2073,33 +2016,33 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2073
2016
|
|
|
2074
2017
|
##### initial guess ####
|
|
2075
2018
|
if not self._fr[(i - 1)]:
|
|
2076
|
-
D0 = ones([
|
|
2019
|
+
D0 = ones([num_points, num_mics])
|
|
2077
2020
|
else:
|
|
2078
2021
|
D0 = sqrt(
|
|
2079
2022
|
self._ac[(i - 1)]
|
|
2080
2023
|
* real(trace(csm) / trace(array(self.freq_data.csm[i - 1], dtype='complex128', copy=1))),
|
|
2081
2024
|
)
|
|
2082
2025
|
|
|
2083
|
-
#
|
|
2084
|
-
|
|
2026
|
+
# boundaries - set to non negative [2*(num_points*num_mics)]
|
|
2027
|
+
boundaries = tile((0, +inf), (num_points * num_mics, 1))
|
|
2085
2028
|
|
|
2086
2029
|
# optimize with gradient solver
|
|
2087
2030
|
# see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
|
|
2088
2031
|
|
|
2089
|
-
qi = ones([
|
|
2032
|
+
qi = ones([num_points, num_mics])
|
|
2090
2033
|
qi, yval, dicts = fmin_l_bfgs_b(
|
|
2091
2034
|
function,
|
|
2092
2035
|
D0,
|
|
2093
2036
|
fprime=None,
|
|
2094
2037
|
args=(),
|
|
2095
2038
|
approx_grad=0,
|
|
2096
|
-
bounds=
|
|
2039
|
+
bounds=boundaries,
|
|
2097
2040
|
factr=100.0,
|
|
2098
2041
|
pgtol=1e-12,
|
|
2099
2042
|
epsilon=1e-08,
|
|
2100
2043
|
iprint=-1,
|
|
2101
2044
|
maxfun=1500000,
|
|
2102
|
-
maxiter=self.
|
|
2045
|
+
maxiter=self.n_iter,
|
|
2103
2046
|
disp=-1,
|
|
2104
2047
|
callback=None,
|
|
2105
2048
|
maxls=20,
|
|
@@ -2111,8 +2054,12 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2111
2054
|
self._fr[i] = 1
|
|
2112
2055
|
|
|
2113
2056
|
|
|
2057
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
2114
2058
|
class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
2115
|
-
"""Beamforming GIB methods with different normalizations
|
|
2059
|
+
"""Beamforming GIB methods with different normalizations.
|
|
2060
|
+
|
|
2061
|
+
See :cite:`Suzuki2011` for details.
|
|
2062
|
+
"""
|
|
2116
2063
|
|
|
2117
2064
|
#: Unit multiplier for evaluating, e.g., nPa instead of Pa.
|
|
2118
2065
|
#: Values are converted back before returning.
|
|
@@ -2120,17 +2067,18 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2120
2067
|
#: within fitting method algorithms. Defaults to 1e9.
|
|
2121
2068
|
unit_mult = Float(1e9, desc='unit multiplier')
|
|
2122
2069
|
|
|
2123
|
-
#:
|
|
2070
|
+
#: Total or maximum number of iterations
|
|
2071
|
+
#: (depending on :attr:`method`),
|
|
2124
2072
|
#: tradeoff between speed and precision;
|
|
2125
2073
|
#: defaults to 10
|
|
2126
|
-
|
|
2074
|
+
n_iter = Int(10, desc='maximum number of iterations')
|
|
2127
2075
|
|
|
2128
2076
|
#: Type of fit method to be used ('Suzuki', 'LassoLars', 'LassoLarsCV', 'LassoLarsBIC',
|
|
2129
2077
|
#: 'OMPCV' or 'NNLS', defaults to 'Suzuki').
|
|
2130
2078
|
#: These methods are implemented in
|
|
2131
2079
|
#: the `scikit-learn <http://scikit-learn.org/stable/user_guide.html>`_
|
|
2132
2080
|
#: module.
|
|
2133
|
-
method =
|
|
2081
|
+
method = Enum(
|
|
2134
2082
|
'Suzuki',
|
|
2135
2083
|
'InverseIRLS',
|
|
2136
2084
|
'LassoLars',
|
|
@@ -2176,7 +2124,7 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2176
2124
|
'precision',
|
|
2177
2125
|
'alpha',
|
|
2178
2126
|
'method',
|
|
2179
|
-
'
|
|
2127
|
+
'n_iter',
|
|
2180
2128
|
'unit_mult',
|
|
2181
2129
|
'eps_perc',
|
|
2182
2130
|
'pnorm',
|
|
@@ -2219,13 +2167,13 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2219
2167
|
f = self._f
|
|
2220
2168
|
n = int(self.na) # number of eigenvalues
|
|
2221
2169
|
m = int(self.m) # number of first eigenvalue
|
|
2222
|
-
|
|
2223
|
-
|
|
2224
|
-
hh = zeros((1,
|
|
2170
|
+
num_channels = self.freq_data.num_channels # number of channels
|
|
2171
|
+
num_points = self.steer.grid.size
|
|
2172
|
+
hh = zeros((1, num_points, num_channels), dtype='D')
|
|
2225
2173
|
|
|
2226
2174
|
# Generate a cross spectral matrix, and perform the eigenvalue decomposition
|
|
2227
2175
|
for i in ind:
|
|
2228
|
-
# for monopole and source
|
|
2176
|
+
# for monopole and source strength Q needs to define density
|
|
2229
2177
|
# calculate a transfer matrix A
|
|
2230
2178
|
hh = self.steer.transfer(f[i])
|
|
2231
2179
|
A = hh.T
|
|
@@ -2234,33 +2182,34 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2234
2182
|
eva, eve = eigh(csm)
|
|
2235
2183
|
eva = eva[::-1]
|
|
2236
2184
|
eve = eve[:, ::-1]
|
|
2237
|
-
|
|
2185
|
+
# set small values zo 0, lowers numerical errors in simulated data
|
|
2186
|
+
eva[eva < max(eva) / 1e12] = 0
|
|
2238
2187
|
# init sources
|
|
2239
|
-
qi = zeros([n + m,
|
|
2240
|
-
# Select the number of coherent modes to be processed referring to the eigenvalue
|
|
2241
|
-
#
|
|
2188
|
+
qi = zeros([n + m, num_points], dtype='complex128')
|
|
2189
|
+
# Select the number of coherent modes to be processed referring to the eigenvalue
|
|
2190
|
+
# distribution.
|
|
2242
2191
|
for s in list(range(m, n + m)):
|
|
2243
2192
|
if eva[s] > 0:
|
|
2244
2193
|
# Generate the corresponding eigenmodes
|
|
2245
2194
|
emode = array(sqrt(eva[s]) * eve[:, s], dtype='complex128')
|
|
2246
2195
|
# choose method for computation
|
|
2247
2196
|
if self.method == 'Suzuki':
|
|
2248
|
-
leftpoints =
|
|
2249
|
-
locpoints = arange(
|
|
2250
|
-
weights = diag(ones(
|
|
2251
|
-
epsilon = arange(self.
|
|
2252
|
-
for it in arange(self.
|
|
2253
|
-
if
|
|
2197
|
+
leftpoints = num_points
|
|
2198
|
+
locpoints = arange(num_points)
|
|
2199
|
+
weights = diag(ones(num_points))
|
|
2200
|
+
epsilon = arange(self.n_iter)
|
|
2201
|
+
for it in arange(self.n_iter):
|
|
2202
|
+
if num_channels <= leftpoints:
|
|
2254
2203
|
AWA = dot(dot(A[:, locpoints], weights), A[:, locpoints].conj().T)
|
|
2255
2204
|
epsilon[it] = max(absolute(eigvals(AWA))) * self.eps_perc
|
|
2256
2205
|
qi[s, locpoints] = dot(
|
|
2257
2206
|
dot(
|
|
2258
2207
|
dot(weights, A[:, locpoints].conj().T),
|
|
2259
|
-
inv(AWA + eye(
|
|
2208
|
+
inv(AWA + eye(num_channels) * epsilon[it]),
|
|
2260
2209
|
),
|
|
2261
2210
|
emode,
|
|
2262
2211
|
)
|
|
2263
|
-
elif
|
|
2212
|
+
elif num_channels > leftpoints:
|
|
2264
2213
|
AA = dot(A[:, locpoints].conj().T, A[:, locpoints])
|
|
2265
2214
|
epsilon[it] = max(absolute(eigvals(AA))) * self.eps_perc
|
|
2266
2215
|
qi[s, locpoints] = dot(
|
|
@@ -2268,8 +2217,10 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2268
2217
|
emode,
|
|
2269
2218
|
)
|
|
2270
2219
|
if self.beta < 1 and it > 1:
|
|
2271
|
-
# Reorder from the greatest to smallest magnitude to define a
|
|
2272
|
-
|
|
2220
|
+
# Reorder from the greatest to smallest magnitude to define a
|
|
2221
|
+
# reduced-point source distribution, and reform a reduced transfer
|
|
2222
|
+
# matrix
|
|
2223
|
+
leftpoints = int(round(num_points * self.beta ** (it + 1)))
|
|
2273
2224
|
idx = argsort(abs(qi[s, locpoints]))[::-1]
|
|
2274
2225
|
# print(it, leftpoints, locpoints, idx )
|
|
2275
2226
|
locpoints = delete(locpoints, [idx[leftpoints::]])
|
|
@@ -2281,33 +2232,33 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2281
2232
|
weights = diag(absolute(qi[s, :]) ** (2 - self.pnorm))
|
|
2282
2233
|
|
|
2283
2234
|
elif self.method == 'InverseIRLS':
|
|
2284
|
-
weights = eye(
|
|
2285
|
-
locpoints = arange(
|
|
2286
|
-
for _it in arange(self.
|
|
2287
|
-
if
|
|
2235
|
+
weights = eye(num_points)
|
|
2236
|
+
locpoints = arange(num_points)
|
|
2237
|
+
for _it in arange(self.n_iter):
|
|
2238
|
+
if num_channels <= num_points:
|
|
2288
2239
|
wtwi = inv(dot(weights.T, weights))
|
|
2289
2240
|
aH = A.conj().T
|
|
2290
2241
|
qi[s, :] = dot(dot(wtwi, aH), dot(inv(dot(A, dot(wtwi, aH))), emode))
|
|
2291
2242
|
weights = diag(absolute(qi[s, :]) ** ((2 - self.pnorm) / 2))
|
|
2292
2243
|
weights = weights / sum(absolute(weights))
|
|
2293
|
-
elif
|
|
2244
|
+
elif num_channels > num_points:
|
|
2294
2245
|
wtw = dot(weights.T, weights)
|
|
2295
2246
|
qi[s, :] = dot(dot(inv(dot(dot(A.conj.T, wtw), A)), dot(A.conj().T, wtw)), emode)
|
|
2296
2247
|
weights = diag(absolute(qi[s, :]) ** ((2 - self.pnorm) / 2))
|
|
2297
2248
|
weights = weights / sum(absolute(weights))
|
|
2298
2249
|
else:
|
|
2299
|
-
locpoints = arange(
|
|
2250
|
+
locpoints = arange(num_points)
|
|
2300
2251
|
unit = self.unit_mult
|
|
2301
2252
|
AB = vstack([hstack([A.real, -A.imag]), hstack([A.imag, A.real])])
|
|
2302
2253
|
R = hstack([emode.real.T, emode.imag.T]) * unit
|
|
2303
2254
|
if self.method == 'LassoLars':
|
|
2304
|
-
model = LassoLars(alpha=self.alpha * unit, max_iter=self.
|
|
2255
|
+
model = LassoLars(alpha=self.alpha * unit, max_iter=self.n_iter, positive=True)
|
|
2305
2256
|
elif self.method == 'LassoLarsBIC':
|
|
2306
|
-
model = LassoLarsIC(criterion='bic', max_iter=self.
|
|
2257
|
+
model = LassoLarsIC(criterion='bic', max_iter=self.n_iter, positive=True)
|
|
2307
2258
|
elif self.method == 'OMPCV':
|
|
2308
2259
|
model = OrthogonalMatchingPursuitCV()
|
|
2309
2260
|
elif self.method == 'LassoLarsCV':
|
|
2310
|
-
model = LassoLarsCV()
|
|
2261
|
+
model = LassoLarsCV(max_iter=self.n_iter, positive=True)
|
|
2311
2262
|
elif self.method == 'NNLS':
|
|
2312
2263
|
model = LinearRegression(positive=True)
|
|
2313
2264
|
model.normalize = False
|
|
@@ -2334,15 +2285,16 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2334
2285
|
Warning,
|
|
2335
2286
|
stacklevel=2,
|
|
2336
2287
|
)
|
|
2337
|
-
# Generate source maps of all selected eigenmodes, and superpose source intensity
|
|
2338
|
-
|
|
2288
|
+
# Generate source maps of all selected eigenmodes, and superpose source intensity
|
|
2289
|
+
# for each source type.
|
|
2290
|
+
temp = zeros(num_points)
|
|
2339
2291
|
temp[locpoints] = sum(absolute(qi[:, locpoints]) ** 2, axis=0)
|
|
2340
2292
|
self._ac[i] = temp
|
|
2341
2293
|
self._fr[i] = 1
|
|
2342
2294
|
|
|
2343
2295
|
|
|
2344
2296
|
class BeamformerAdaptiveGrid(BeamformerBase, Grid):
|
|
2345
|
-
"""
|
|
2297
|
+
"""Abstract base class for array methods without predefined grid."""
|
|
2346
2298
|
|
|
2347
2299
|
# the grid positions live in a shadow trait
|
|
2348
2300
|
_gpos = Any
|
|
@@ -2350,7 +2302,7 @@ class BeamformerAdaptiveGrid(BeamformerBase, Grid):
|
|
|
2350
2302
|
def _get_shape(self):
|
|
2351
2303
|
return (self.size,)
|
|
2352
2304
|
|
|
2353
|
-
def
|
|
2305
|
+
def _get_pos(self):
|
|
2354
2306
|
return self._gpos
|
|
2355
2307
|
|
|
2356
2308
|
def integrate(self, sector):
|
|
@@ -2385,7 +2337,10 @@ class BeamformerAdaptiveGrid(BeamformerBase, Grid):
|
|
|
2385
2337
|
|
|
2386
2338
|
|
|
2387
2339
|
class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
2388
|
-
"""Orthogonal beamforming without predefined grid.
|
|
2340
|
+
"""Orthogonal beamforming without predefined grid.
|
|
2341
|
+
|
|
2342
|
+
See :cite:`Sarradj2022` for details.
|
|
2343
|
+
"""
|
|
2389
2344
|
|
|
2390
2345
|
#: List of components to consider, use this to directly set the eigenvalues
|
|
2391
2346
|
#: used in the beamformer. Alternatively, set :attr:`n`.
|
|
@@ -2398,7 +2353,7 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2398
2353
|
n = Int(1)
|
|
2399
2354
|
|
|
2400
2355
|
#: Geometrical bounds of the search domain to consider.
|
|
2401
|
-
#: :attr:`bound`
|
|
2356
|
+
#: :attr:`bound` is a list that contains exactly three tuple of
|
|
2402
2357
|
#: (min,max) for each of the coordinates x, y, z.
|
|
2403
2358
|
#: Defaults to [(-1.,1.),(-1.,1.),(0.01,1.)]
|
|
2404
2359
|
bounds = List(Tuple(Float, Float), minlen=3, maxlen=3, value=[(-1.0, 1.0), (-1.0, 1.0), (0.01, 1.0)])
|
|
@@ -2419,7 +2374,7 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2419
2374
|
|
|
2420
2375
|
# internal identifier
|
|
2421
2376
|
digest = Property(
|
|
2422
|
-
depends_on=['freq_data.digest', '
|
|
2377
|
+
depends_on=['freq_data.digest', 'steer.digest', 'precision', 'r_diag', 'eva_list', 'bounds', 'shgo'],
|
|
2423
2378
|
)
|
|
2424
2379
|
|
|
2425
2380
|
@cached_property
|
|
@@ -2455,14 +2410,14 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2455
2410
|
"""
|
|
2456
2411
|
f = self._f
|
|
2457
2412
|
normfactor = self.sig_loss_norm()
|
|
2458
|
-
|
|
2413
|
+
num_channels = self.freq_data.num_channels
|
|
2459
2414
|
# eigenvalue number list in standard form from largest to smallest
|
|
2460
2415
|
eva_list = unique(self.eva_list % self.steer.mics.num_mics)[::-1]
|
|
2461
2416
|
steer_type = self.steer.steer_type
|
|
2462
2417
|
if steer_type == 'custom':
|
|
2463
2418
|
msg = 'custom steer_type is not implemented'
|
|
2464
2419
|
raise NotImplementedError(msg)
|
|
2465
|
-
mpos = self.steer.mics.
|
|
2420
|
+
mpos = self.steer.mics.pos
|
|
2466
2421
|
env = self.steer.env
|
|
2467
2422
|
shgo_opts = {
|
|
2468
2423
|
'n': 256,
|
|
@@ -2506,8 +2461,8 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2506
2461
|
# store result for position
|
|
2507
2462
|
self._gpos[:, i1] = oR['x']
|
|
2508
2463
|
# store result for level
|
|
2509
|
-
self._ac[i, i1] = eva[n] /
|
|
2510
|
-
# print(oR['x'],eva[n]/
|
|
2464
|
+
self._ac[i, i1] = eva[n] / num_channels
|
|
2465
|
+
# print(oR['x'],eva[n]/num_channels,oR)
|
|
2511
2466
|
self._fr[i] = 1
|
|
2512
2467
|
|
|
2513
2468
|
|