acoular 24.10__py3-none-any.whl → 25.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- acoular/__init__.py +5 -2
- acoular/aiaa/__init__.py +12 -0
- acoular/{tools → aiaa}/aiaa.py +23 -28
- acoular/base.py +75 -55
- acoular/calib.py +129 -34
- acoular/configuration.py +11 -9
- acoular/demo/__init__.py +1 -0
- acoular/demo/acoular_demo.py +31 -18
- acoular/deprecation.py +85 -0
- acoular/environments.py +481 -229
- acoular/fastFuncs.py +90 -84
- acoular/fbeamform.py +203 -411
- acoular/fprocess.py +233 -123
- acoular/grids.py +793 -424
- acoular/h5cache.py +29 -40
- acoular/h5files.py +2 -6
- acoular/microphones.py +197 -74
- acoular/process.py +660 -149
- acoular/sdinput.py +23 -20
- acoular/signals.py +461 -159
- acoular/sources.py +1311 -489
- acoular/spectra.py +328 -352
- acoular/tbeamform.py +79 -202
- acoular/tfastfuncs.py +21 -21
- acoular/tools/__init__.py +2 -8
- acoular/tools/helpers.py +216 -2
- acoular/tools/metrics.py +4 -4
- acoular/tools/utils.py +106 -200
- acoular/tprocess.py +348 -309
- acoular/traitsviews.py +10 -10
- acoular/trajectory.py +126 -53
- acoular/version.py +2 -2
- {acoular-24.10.dist-info → acoular-25.3.dist-info}/METADATA +39 -17
- acoular-25.3.dist-info/RECORD +56 -0
- {acoular-24.10.dist-info → acoular-25.3.dist-info}/WHEEL +1 -1
- acoular-24.10.dist-info/RECORD +0 -54
- {acoular-24.10.dist-info → acoular-25.3.dist-info}/licenses/AUTHORS.rst +0 -0
- {acoular-24.10.dist-info → acoular-25.3.dist-info}/licenses/LICENSE +0 -0
acoular/fbeamform.py
CHANGED
|
@@ -67,12 +67,12 @@ from numpy import (
|
|
|
67
67
|
pi,
|
|
68
68
|
real,
|
|
69
69
|
reshape,
|
|
70
|
-
round,
|
|
70
|
+
round, # noqa: A004
|
|
71
71
|
searchsorted,
|
|
72
72
|
sign,
|
|
73
73
|
size,
|
|
74
74
|
sqrt,
|
|
75
|
-
sum,
|
|
75
|
+
sum, # noqa: A004
|
|
76
76
|
tile,
|
|
77
77
|
trace,
|
|
78
78
|
tril,
|
|
@@ -92,13 +92,12 @@ from traits.api import (
|
|
|
92
92
|
Dict,
|
|
93
93
|
Enum,
|
|
94
94
|
Float,
|
|
95
|
-
|
|
95
|
+
HasStrictTraits,
|
|
96
96
|
Instance,
|
|
97
97
|
Int,
|
|
98
98
|
List,
|
|
99
99
|
Property,
|
|
100
100
|
Range,
|
|
101
|
-
Trait,
|
|
102
101
|
Tuple,
|
|
103
102
|
cached_property,
|
|
104
103
|
on_trait_change,
|
|
@@ -106,7 +105,9 @@ from traits.api import (
|
|
|
106
105
|
)
|
|
107
106
|
from traits.trait_errors import TraitError
|
|
108
107
|
|
|
108
|
+
# acoular imports
|
|
109
109
|
from .configuration import config
|
|
110
|
+
from .deprecation import deprecated_alias
|
|
110
111
|
from .environments import Environment
|
|
111
112
|
from .fastFuncs import beamformerFreq, calcPointSpreadFunction, calcTransfer, damasSolverGaussSeidel
|
|
112
113
|
from .grids import Grid, Sector
|
|
@@ -119,25 +120,25 @@ from .tfastfuncs import _steer_I, _steer_II, _steer_III, _steer_IV
|
|
|
119
120
|
|
|
120
121
|
sklearn_ndict = {}
|
|
121
122
|
if parse(sklearn.__version__) < parse('1.4'):
|
|
122
|
-
sklearn_ndict['normalize'] = False
|
|
123
|
+
sklearn_ndict['normalize'] = False # pragma: no cover
|
|
123
124
|
|
|
124
|
-
BEAMFORMER_BASE_DIGEST_DEPENDENCIES = ['freq_data.digest', 'r_diag', 'r_diag_norm', 'precision', '
|
|
125
|
+
BEAMFORMER_BASE_DIGEST_DEPENDENCIES = ['freq_data.digest', 'r_diag', 'r_diag_norm', 'precision', 'steer.digest']
|
|
125
126
|
|
|
126
127
|
|
|
127
|
-
class SteeringVector(
|
|
128
|
+
class SteeringVector(HasStrictTraits):
|
|
128
129
|
"""Basic class for implementing steering vectors with monopole source transfer models.
|
|
129
130
|
|
|
130
131
|
Handles four different steering vector formulations. See :cite:`Sarradj2012` for details.
|
|
131
132
|
"""
|
|
132
133
|
|
|
133
134
|
#: :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
|
|
134
|
-
grid =
|
|
135
|
+
grid = Instance(Grid, desc='beamforming grid')
|
|
135
136
|
|
|
136
137
|
#: :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
|
|
137
|
-
mics =
|
|
138
|
+
mics = Instance(MicGeom, desc='microphone geometry')
|
|
138
139
|
|
|
139
140
|
#: Type of steering vectors, see also :cite:`Sarradj2012`. Defaults to 'true level'.
|
|
140
|
-
steer_type =
|
|
141
|
+
steer_type = Enum('true level', 'true location', 'classic', 'inverse', desc='type of steering vectors used')
|
|
141
142
|
|
|
142
143
|
#: :class:`~acoular.environments.Environment` or derived object,
|
|
143
144
|
#: which provides information about the sound propagation in the medium.
|
|
@@ -202,17 +203,17 @@ class SteeringVector(HasPrivateTraits):
|
|
|
202
203
|
# internal identifier, use for inverse methods, excluding steering vector type
|
|
203
204
|
inv_digest = Property(depends_on=['env.digest', 'grid.digest', 'mics.digest', '_ref'])
|
|
204
205
|
|
|
205
|
-
@property_depends_on('grid.digest, env.digest, _ref')
|
|
206
|
+
@property_depends_on(['grid.digest', 'env.digest', '_ref'])
|
|
206
207
|
def _get_r0(self):
|
|
207
208
|
if isscalar(self.ref):
|
|
208
209
|
if self.ref > 0:
|
|
209
210
|
return full((self.grid.size,), self.ref)
|
|
210
211
|
return self.env._r(self.grid.pos())
|
|
211
|
-
return self.env._r(self.grid.
|
|
212
|
+
return self.env._r(self.grid.pos, self.ref[:, newaxis])
|
|
212
213
|
|
|
213
|
-
@property_depends_on('grid.digest, mics.digest, env.digest')
|
|
214
|
+
@property_depends_on(['grid.digest', 'mics.digest', 'env.digest'])
|
|
214
215
|
def _get_rm(self):
|
|
215
|
-
return atleast_2d(self.env._r(self.grid.
|
|
216
|
+
return atleast_2d(self.env._r(self.grid.pos, self.mics.pos))
|
|
216
217
|
|
|
217
218
|
@cached_property
|
|
218
219
|
def _get_digest(self):
|
|
@@ -304,136 +305,17 @@ class LazyBfResult:
|
|
|
304
305
|
return self.bf._ac.__getitem__(key)
|
|
305
306
|
|
|
306
307
|
|
|
307
|
-
class BeamformerBase(
|
|
308
|
+
class BeamformerBase(HasStrictTraits):
|
|
308
309
|
"""Beamforming using the basic delay-and-sum algorithm in the frequency domain."""
|
|
309
310
|
|
|
310
311
|
# Instance of :class:`~acoular.fbeamform.SteeringVector` or its derived classes
|
|
311
312
|
# that contains information about the steering vector. This is a private trait.
|
|
312
313
|
# Do not set this directly, use `steer` trait instead.
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
#: :class:`~acoular.fbeamform.SteeringVector` or derived object.
|
|
316
|
-
#: Defaults to :class:`~acoular.fbeamform.SteeringVector` object.
|
|
317
|
-
steer = Property(desc='steering vector object')
|
|
318
|
-
|
|
319
|
-
def _get_steer(self):
|
|
320
|
-
return self._steer_obj
|
|
321
|
-
|
|
322
|
-
def _set_steer(self, steer):
|
|
323
|
-
if isinstance(steer, SteeringVector):
|
|
324
|
-
self._steer_obj = steer
|
|
325
|
-
elif steer in ('true level', 'true location', 'classic', 'inverse'):
|
|
326
|
-
# Type of steering vectors, see also :cite:`Sarradj2012`.
|
|
327
|
-
msg = (
|
|
328
|
-
"Deprecated use of 'steer' trait. Please use the 'steer' with an object of class "
|
|
329
|
-
"'SteeringVector'. Using a string to specify the steer type will be removed in "
|
|
330
|
-
'version 25.01.'
|
|
331
|
-
)
|
|
332
|
-
warn(
|
|
333
|
-
msg,
|
|
334
|
-
DeprecationWarning,
|
|
335
|
-
stacklevel=2,
|
|
336
|
-
)
|
|
337
|
-
self._steer_obj.steer_type = steer
|
|
338
|
-
else:
|
|
339
|
-
raise (TraitError(args=self, name='steer', info='SteeringVector', value=steer))
|
|
340
|
-
|
|
341
|
-
# --- List of backwards compatibility traits and their setters/getters -----------
|
|
342
|
-
|
|
343
|
-
# :class:`~acoular.environments.Environment` or derived object.
|
|
344
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
345
|
-
# Now governed by :attr:`steer` trait.
|
|
346
|
-
env = Property()
|
|
347
|
-
|
|
348
|
-
def _get_env(self):
|
|
349
|
-
return self._steer_obj.env
|
|
350
|
-
|
|
351
|
-
def _set_env(self, env):
|
|
352
|
-
msg = (
|
|
353
|
-
"Deprecated use of 'env' trait. Please use the 'steer' trait with an object of class"
|
|
354
|
-
"'SteeringVector'. The 'env' trait will be removed in version 25.01."
|
|
355
|
-
)
|
|
356
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
357
|
-
self._steer_obj.env = env
|
|
358
|
-
|
|
359
|
-
# The speed of sound.
|
|
360
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
361
|
-
# Now governed by :attr:`steer` trait.
|
|
362
|
-
c = Property()
|
|
363
|
-
|
|
364
|
-
def _get_c(self):
|
|
365
|
-
return self._steer_obj.env.c
|
|
366
|
-
|
|
367
|
-
def _set_c(self, c):
|
|
368
|
-
msg = (
|
|
369
|
-
"Deprecated use of 'c' trait. Please use the 'steer' trait with an object of class"
|
|
370
|
-
"'SteeringVector' that holds an 'Environment' instance."
|
|
371
|
-
"The 'c' trait will be removed in version 25.01."
|
|
372
|
-
)
|
|
373
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
374
|
-
self._steer_obj.env.c = c
|
|
375
|
-
|
|
376
|
-
# :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
|
|
377
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
378
|
-
# Now governed by :attr:`steer` trait.
|
|
379
|
-
grid = Property()
|
|
380
|
-
|
|
381
|
-
def _get_grid(self):
|
|
382
|
-
return self._steer_obj.grid
|
|
383
|
-
|
|
384
|
-
def _set_grid(self, grid):
|
|
385
|
-
msg = (
|
|
386
|
-
"Deprecated use of 'grid' trait. Please use the 'steer' trait with an object of class"
|
|
387
|
-
"'SteeringVector'. The 'grid' trait will be removed in version 25.01."
|
|
388
|
-
)
|
|
389
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
390
|
-
self._steer_obj.grid = grid
|
|
391
|
-
|
|
392
|
-
# :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
|
|
393
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
394
|
-
# Now governed by :attr:`steer` trait
|
|
395
|
-
mpos = Property()
|
|
396
|
-
|
|
397
|
-
def _get_mpos(self):
|
|
398
|
-
return self._steer_obj.mics
|
|
399
|
-
|
|
400
|
-
def _set_mpos(self, mpos):
|
|
401
|
-
msg = (
|
|
402
|
-
"Deprecated use of 'mpos' trait. Please use the 'steer' trait with an object of class"
|
|
403
|
-
"'SteeringVector'. The 'mpos' trait will be removed in version 25.01."
|
|
404
|
-
)
|
|
405
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
406
|
-
self._steer_obj.mics = mpos
|
|
407
|
-
|
|
408
|
-
# Sound travel distances from microphone array center to grid points (r0)
|
|
409
|
-
# and all array mics to grid points (rm). Readonly.
|
|
410
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
411
|
-
# Now governed by :attr:`steer` trait
|
|
412
|
-
r0 = Property()
|
|
413
|
-
|
|
414
|
-
def _get_r0(self):
|
|
415
|
-
msg = (
|
|
416
|
-
"Deprecated use of 'r0' trait. Please use the 'steer' trait with an object of class"
|
|
417
|
-
"'SteeringVector'. The 'r0' trait will be removed in version 25.01."
|
|
418
|
-
)
|
|
419
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
420
|
-
return self._steer_obj.r0
|
|
421
|
-
|
|
422
|
-
rm = Property()
|
|
423
|
-
|
|
424
|
-
def _get_rm(self):
|
|
425
|
-
msg = (
|
|
426
|
-
"Deprecated use of 'rm' trait. Please use the 'steer' trait with an object of class"
|
|
427
|
-
"'SteeringVector'. The 'rm' trait will be removed in version 25.01."
|
|
428
|
-
)
|
|
429
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
430
|
-
return self._steer_obj.rm
|
|
431
|
-
|
|
432
|
-
# --- End of backwards compatibility traits --------------------------------------
|
|
314
|
+
steer = Instance(SteeringVector, args=())
|
|
433
315
|
|
|
434
316
|
#: :class:`~acoular.spectra.PowerSpectra` object that provides the
|
|
435
317
|
#: cross spectral matrix and eigenvalues
|
|
436
|
-
freq_data =
|
|
318
|
+
freq_data = Instance(PowerSpectra, desc='freq data object')
|
|
437
319
|
|
|
438
320
|
#: Boolean flag, if 'True' (default), the main diagonal is removed before beamforming.
|
|
439
321
|
r_diag = Bool(True, desc='removal of diagonal')
|
|
@@ -450,7 +332,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
450
332
|
)
|
|
451
333
|
|
|
452
334
|
#: Floating point precision of property result. Corresponding to numpy dtypes. Default = 64 Bit.
|
|
453
|
-
precision =
|
|
335
|
+
precision = Enum('float64', 'float32', desc='precision (32/64 Bit) of result, corresponding to numpy dtypes')
|
|
454
336
|
|
|
455
337
|
#: Boolean flag, if 'True' (default), the result is cached in h5 files.
|
|
456
338
|
cached = Bool(True, desc='cached flag')
|
|
@@ -467,6 +349,12 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
467
349
|
# internal identifier
|
|
468
350
|
digest = Property(depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES)
|
|
469
351
|
|
|
352
|
+
# private traits
|
|
353
|
+
_ac = Any(desc='beamforming result')
|
|
354
|
+
_fr = Any(desc='flag for beamforming result at frequency index')
|
|
355
|
+
_f = CArray(dtype='float64', desc='frequencies')
|
|
356
|
+
_numfreq = Int(desc='number of frequencies')
|
|
357
|
+
|
|
470
358
|
@cached_property
|
|
471
359
|
def _get_digest(self):
|
|
472
360
|
return digest(self)
|
|
@@ -492,7 +380,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
492
380
|
# print("no data existent for nodename:", nodename)
|
|
493
381
|
if config.global_caching == 'readonly':
|
|
494
382
|
return (None, None, None)
|
|
495
|
-
numfreq = self.freq_data.fftfreq().shape[0]
|
|
383
|
+
numfreq = self.freq_data.fftfreq().shape[0]
|
|
496
384
|
group = self.h5f.create_new_group(nodename)
|
|
497
385
|
self.h5f.create_compressible_array(
|
|
498
386
|
'freqs',
|
|
@@ -522,11 +410,12 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
522
410
|
return (ac, fr, gpos)
|
|
523
411
|
|
|
524
412
|
def _assert_equal_channels(self):
|
|
525
|
-
|
|
526
|
-
if
|
|
527
|
-
|
|
413
|
+
num_channels = self.freq_data.num_channels
|
|
414
|
+
if num_channels != self.steer.mics.num_mics or num_channels == 0:
|
|
415
|
+
msg = f'{num_channels:d} channels do not fit {self.steer.mics.num_mics:d} mics'
|
|
416
|
+
raise ValueError(msg)
|
|
528
417
|
|
|
529
|
-
@property_depends_on('digest')
|
|
418
|
+
@property_depends_on(['digest'])
|
|
530
419
|
def _get_result(self):
|
|
531
420
|
"""Implements the :attr:`result` getter routine.
|
|
532
421
|
The beamforming result is either loaded or calculated.
|
|
@@ -566,7 +455,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
566
455
|
if not self.r_diag: # Full CSM --> no normalization needed
|
|
567
456
|
normfactor = 1.0
|
|
568
457
|
elif self.r_diag_norm == 0.0: # Removed diag: standard normalization factor
|
|
569
|
-
nMics = float(self.freq_data.
|
|
458
|
+
nMics = float(self.freq_data.num_channels)
|
|
570
459
|
normfactor = nMics / (nMics - 1)
|
|
571
460
|
elif self.r_diag_norm != 0.0: # Removed diag: user defined normalization factor
|
|
572
461
|
normfactor = self.r_diag_norm
|
|
@@ -583,7 +472,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
583
472
|
- Function for frequency-dependent steering vector calculation
|
|
584
473
|
|
|
585
474
|
"""
|
|
586
|
-
if type(self.steer)
|
|
475
|
+
if type(self.steer) is SteeringVector: # for simple steering vector, use faster method
|
|
587
476
|
param_type = self.steer.steer_type
|
|
588
477
|
|
|
589
478
|
def param_steer_func(f):
|
|
@@ -671,7 +560,7 @@ class BeamformerBase(HasPrivateTraits):
|
|
|
671
560
|
ind = searchsorted(freq, f)
|
|
672
561
|
if ind >= len(freq):
|
|
673
562
|
warn(
|
|
674
|
-
'Queried frequency (
|
|
563
|
+
f'Queried frequency ({f:g} Hz) not in resolved frequency range. Returning zeros.',
|
|
675
564
|
Warning,
|
|
676
565
|
stacklevel=2,
|
|
677
566
|
)
|
|
@@ -820,7 +709,8 @@ class BeamformerFunctional(BeamformerBase):
|
|
|
820
709
|
#: Functional Beamforming is only well defined for full CSM
|
|
821
710
|
r_diag = Enum(False, desc='False, as Functional Beamformer is only well defined for the full CSM')
|
|
822
711
|
|
|
823
|
-
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Functional
|
|
712
|
+
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Functional
|
|
713
|
+
#: Beamforming is only well defined for full CSM.
|
|
824
714
|
r_diag_norm = Enum(
|
|
825
715
|
1.0,
|
|
826
716
|
desc='No normalization needed. Functional Beamforming is only well defined for full CSM.',
|
|
@@ -855,14 +745,15 @@ class BeamformerFunctional(BeamformerBase):
|
|
|
855
745
|
normfactor = self.sig_loss_norm()
|
|
856
746
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
857
747
|
for i in ind:
|
|
858
|
-
if self.r_diag:
|
|
748
|
+
if self.r_diag: # pragma: no cover
|
|
859
749
|
# This case is not used at the moment (see Trait r_diag)
|
|
860
750
|
# It would need some testing as structural changes were not tested...
|
|
861
751
|
# ==============================================================================
|
|
862
|
-
# One cannot use spectral decomposition when diagonal of csm is
|
|
863
|
-
# as the resulting modified eigenvectors are not
|
|
864
|
-
#
|
|
865
|
-
#
|
|
752
|
+
# One cannot use spectral decomposition when diagonal of csm is
|
|
753
|
+
# removed, as the resulting modified eigenvectors are not
|
|
754
|
+
# orthogonal to each other anymore. Therefore potentiating
|
|
755
|
+
# cannot be applied only to the eigenvalues. --> To avoid this
|
|
756
|
+
# the root of the csm (removed diag) is calculated directly.
|
|
866
757
|
# WATCH OUT: This doesn't really produce good results.
|
|
867
758
|
# ==============================================================================
|
|
868
759
|
csm = self.freq_data.csm[i]
|
|
@@ -907,7 +798,8 @@ class BeamformerCapon(BeamformerBase):
|
|
|
907
798
|
# for Capon beamforming r_diag is set to 'False'.
|
|
908
799
|
r_diag = Enum(False, desc='removal of diagonal')
|
|
909
800
|
|
|
910
|
-
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Beamformer Capon
|
|
801
|
+
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since Beamformer Capon
|
|
802
|
+
#: is only well defined for full CSM.
|
|
911
803
|
r_diag_norm = Enum(
|
|
912
804
|
1.0,
|
|
913
805
|
desc='No normalization. BeamformerCapon is only well defined for full CSM.',
|
|
@@ -932,7 +824,7 @@ class BeamformerCapon(BeamformerBase):
|
|
|
932
824
|
|
|
933
825
|
"""
|
|
934
826
|
f = self._f
|
|
935
|
-
nMics = self.freq_data.
|
|
827
|
+
nMics = self.freq_data.num_channels
|
|
936
828
|
normfactor = self.sig_loss_norm() * nMics**2
|
|
937
829
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
938
830
|
for i in ind:
|
|
@@ -949,8 +841,8 @@ class BeamformerEig(BeamformerBase):
|
|
|
949
841
|
"""
|
|
950
842
|
|
|
951
843
|
#: Number of component to calculate:
|
|
952
|
-
#: 0 (smallest) ... :attr:`~acoular.base.SamplesGenerator.
|
|
953
|
-
#: defaults to -1, i.e.
|
|
844
|
+
#: 0 (smallest) ... :attr:`~acoular.base.SamplesGenerator.num_channels`-1;
|
|
845
|
+
#: defaults to -1, i.e. num_channels-1
|
|
954
846
|
n = Int(-1, desc='No. of eigenvalue')
|
|
955
847
|
|
|
956
848
|
# Actual component to calculate, internal, readonly.
|
|
@@ -963,7 +855,7 @@ class BeamformerEig(BeamformerBase):
|
|
|
963
855
|
def _get_digest(self):
|
|
964
856
|
return digest(self)
|
|
965
857
|
|
|
966
|
-
@property_depends_on('steer.mics, n')
|
|
858
|
+
@property_depends_on(['steer.mics', 'n'])
|
|
967
859
|
def _get_na(self):
|
|
968
860
|
na = self.n
|
|
969
861
|
nm = self.steer.mics.num_mics
|
|
@@ -1020,7 +912,8 @@ class BeamformerMusic(BeamformerEig):
|
|
|
1020
912
|
# for MUSIC beamforming r_diag is set to 'False'.
|
|
1021
913
|
r_diag = Enum(False, desc='removal of diagonal')
|
|
1022
914
|
|
|
1023
|
-
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since BeamformerMusic
|
|
915
|
+
#: Normalization factor in case of CSM diagonal removal. Defaults to 1.0 since BeamformerMusic
|
|
916
|
+
#: is only well defined for full CSM.
|
|
1024
917
|
r_diag_norm = Enum(
|
|
1025
918
|
1.0,
|
|
1026
919
|
desc='No normalization. BeamformerMusic is only well defined for full CSM.',
|
|
@@ -1049,7 +942,7 @@ class BeamformerMusic(BeamformerEig):
|
|
|
1049
942
|
|
|
1050
943
|
"""
|
|
1051
944
|
f = self._f
|
|
1052
|
-
nMics = self.freq_data.
|
|
945
|
+
nMics = self.freq_data.num_channels
|
|
1053
946
|
n = int(self.steer.mics.num_mics - self.na)
|
|
1054
947
|
normfactor = self.sig_loss_norm() * nMics**2
|
|
1055
948
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
@@ -1067,7 +960,7 @@ class BeamformerMusic(BeamformerEig):
|
|
|
1067
960
|
self._fr[i] = 1
|
|
1068
961
|
|
|
1069
962
|
|
|
1070
|
-
class PointSpreadFunction(
|
|
963
|
+
class PointSpreadFunction(HasStrictTraits):
|
|
1071
964
|
"""The point spread function.
|
|
1072
965
|
|
|
1073
966
|
This class provides tools to calculate the PSF depending on the used
|
|
@@ -1079,143 +972,30 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1079
972
|
# Instance of :class:`~acoular.fbeamform.SteeringVector` or its derived classes
|
|
1080
973
|
# that contains information about the steering vector. This is a private trait.
|
|
1081
974
|
# Do not set this directly, use `steer` trait instead.
|
|
1082
|
-
|
|
1083
|
-
|
|
1084
|
-
#: :class:`~acoular.fbeamform.SteeringVector` or derived object.
|
|
1085
|
-
#: Defaults to :class:`~acoular.fbeamform.SteeringVector` object.
|
|
1086
|
-
steer = Property(desc='steering vector object')
|
|
1087
|
-
|
|
1088
|
-
def _get_steer(self):
|
|
1089
|
-
return self._steer_obj
|
|
1090
|
-
|
|
1091
|
-
def _set_steer(self, steer):
|
|
1092
|
-
if isinstance(steer, SteeringVector):
|
|
1093
|
-
self._steer_obj = steer
|
|
1094
|
-
elif steer in ('true level', 'true location', 'classic', 'inverse'):
|
|
1095
|
-
msg = (
|
|
1096
|
-
"Deprecated use of 'steer' trait. Please use object of class 'SteeringVector'."
|
|
1097
|
-
"The functionality of using string values for 'steer' will be removed in version 25.01."
|
|
1098
|
-
)
|
|
1099
|
-
# Type of steering vectors, see also :cite:`Sarradj2012`.
|
|
1100
|
-
warn(
|
|
1101
|
-
msg,
|
|
1102
|
-
Warning,
|
|
1103
|
-
stacklevel=2,
|
|
1104
|
-
)
|
|
1105
|
-
self._steer_obj = SteeringVector(steer_type=steer)
|
|
1106
|
-
else:
|
|
1107
|
-
raise (TraitError(args=self, name='steer', info='SteeringVector', value=steer))
|
|
1108
|
-
|
|
1109
|
-
# --- List of backwards compatibility traits and their setters/getters -----------
|
|
1110
|
-
|
|
1111
|
-
# :class:`~acoular.environments.Environment` or derived object.
|
|
1112
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1113
|
-
# Now governed by :attr:`steer` trait.
|
|
1114
|
-
env = Property()
|
|
1115
|
-
|
|
1116
|
-
def _get_env(self):
|
|
1117
|
-
return self._steer_obj.env
|
|
1118
|
-
|
|
1119
|
-
def _set_env(self, env):
|
|
1120
|
-
msg = (
|
|
1121
|
-
"Deprecated use of 'env' trait. Please use the 'steer' trait with an object of class"
|
|
1122
|
-
"'SteeringVector'. The 'env' trait will be removed in version 25.01."
|
|
1123
|
-
)
|
|
1124
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
1125
|
-
self._steer_obj.env = env
|
|
1126
|
-
|
|
1127
|
-
# The speed of sound.
|
|
1128
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1129
|
-
# Now governed by :attr:`steer` trait.
|
|
1130
|
-
c = Property()
|
|
1131
|
-
|
|
1132
|
-
def _get_c(self):
|
|
1133
|
-
return self._steer_obj.env.c
|
|
1134
|
-
|
|
1135
|
-
def _set_c(self, c):
|
|
1136
|
-
msg = (
|
|
1137
|
-
"Deprecated use of 'c' trait. Please use the 'steer' trait with an object of class"
|
|
1138
|
-
"'SteeringVector'. The 'c' trait will be removed in version 25.01."
|
|
1139
|
-
)
|
|
1140
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
1141
|
-
self._steer_obj.env.c = c
|
|
1142
|
-
|
|
1143
|
-
# :class:`~acoular.grids.Grid`-derived object that provides the grid locations.
|
|
1144
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1145
|
-
# Now governed by :attr:`steer` trait.
|
|
1146
|
-
grid = Property()
|
|
1147
|
-
|
|
1148
|
-
def _get_grid(self):
|
|
1149
|
-
return self._steer_obj.grid
|
|
1150
|
-
|
|
1151
|
-
def _set_grid(self, grid):
|
|
1152
|
-
msg = (
|
|
1153
|
-
"Deprecated use of 'grid' trait. Please use the 'steer' trait with an object of class"
|
|
1154
|
-
"'SteeringVector'. The 'grid' trait will be removed in version 25.01."
|
|
1155
|
-
)
|
|
1156
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
1157
|
-
self._steer_obj.grid = grid
|
|
1158
|
-
|
|
1159
|
-
# :class:`~acoular.microphones.MicGeom` object that provides the microphone locations.
|
|
1160
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1161
|
-
# Now governed by :attr:`steer` trait
|
|
1162
|
-
mpos = Property()
|
|
1163
|
-
|
|
1164
|
-
def _get_mpos(self):
|
|
1165
|
-
return self._steer_obj.mics
|
|
1166
|
-
|
|
1167
|
-
def _set_mpos(self, mpos):
|
|
1168
|
-
msg = (
|
|
1169
|
-
"Deprecated use of 'mpos' trait. Please use the 'steer' trait with an object of class"
|
|
1170
|
-
"'SteeringVector'. The 'mpos' trait will be removed in version 25.01."
|
|
1171
|
-
)
|
|
1172
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
1173
|
-
self._steer_obj.mics = mpos
|
|
1174
|
-
|
|
1175
|
-
# Sound travel distances from microphone array center to grid points (r0)
|
|
1176
|
-
# and all array mics to grid points (rm). Readonly.
|
|
1177
|
-
# Deprecated! Only kept for backwards compatibility.
|
|
1178
|
-
# Now governed by :attr:`steer` trait
|
|
1179
|
-
r0 = Property()
|
|
1180
|
-
|
|
1181
|
-
def _get_r0(self):
|
|
1182
|
-
msg = (
|
|
1183
|
-
"Deprecated use of 'r0' trait. Please use the 'steer' trait with an object of class"
|
|
1184
|
-
"'SteeringVector'. The 'r0' trait will be removed in version 25.01."
|
|
1185
|
-
)
|
|
1186
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
1187
|
-
return self._steer_obj.r0
|
|
1188
|
-
|
|
1189
|
-
rm = Property()
|
|
1190
|
-
|
|
1191
|
-
def _get_rm(self):
|
|
1192
|
-
msg = (
|
|
1193
|
-
"Deprecated use of 'rm' trait. Please use the 'steer' trait with an object of class"
|
|
1194
|
-
"'SteeringVector'. The 'rm' trait will be removed in version 25.01."
|
|
1195
|
-
)
|
|
1196
|
-
warn(msg, DeprecationWarning, stacklevel=2)
|
|
1197
|
-
return self._steer_obj.rm
|
|
1198
|
-
|
|
1199
|
-
# --- End of backwards compatibility traits --------------------------------------
|
|
975
|
+
steer = Instance(SteeringVector, args=())
|
|
1200
976
|
|
|
1201
977
|
#: Indices of grid points to calculate the PSF for.
|
|
1202
978
|
grid_indices = CArray(
|
|
1203
979
|
dtype=int,
|
|
1204
980
|
value=array([]),
|
|
1205
981
|
desc='indices of grid points for psf',
|
|
1206
|
-
) # value=array([]), value=self.grid.pos(),
|
|
982
|
+
) # value=array([]), value=self.steer.grid.pos(),
|
|
1207
983
|
|
|
1208
984
|
#: Flag that defines how to calculate and store the point spread function
|
|
1209
985
|
#: defaults to 'single'.
|
|
1210
986
|
#:
|
|
1211
|
-
#: * 'full': Calculate the full PSF (for all grid points) in one go (should be used if the PSF
|
|
1212
|
-
#:
|
|
1213
|
-
#: * '
|
|
1214
|
-
#:
|
|
1215
|
-
|
|
987
|
+
#: * 'full': Calculate the full PSF (for all grid points) in one go (should be used if the PSF
|
|
988
|
+
#: at all grid points is needed, as with :class:`DAMAS<BeamformerDamas>`)
|
|
989
|
+
#: * 'single': Calculate the PSF for the grid points defined by :attr:`grid_indices`, one by one
|
|
990
|
+
#: (useful if not all PSFs are needed, as with :class:`CLEAN<BeamformerClean>`)
|
|
991
|
+
#: * 'block': Calculate the PSF for the grid points defined by :attr:`grid_indices`, in one go
|
|
992
|
+
#: (useful if not all PSFs are needed, as with :class:`CLEAN<BeamformerClean>`)
|
|
993
|
+
#: * 'readonly': Do not attempt to calculate the PSF since it should already be cached (useful
|
|
994
|
+
#: if multiple processes have to access the cache file)
|
|
995
|
+
calcmode = Enum('single', 'block', 'full', 'readonly', desc='mode of calculation / storage')
|
|
1216
996
|
|
|
1217
997
|
#: Floating point precision of property psf. Corresponding to numpy dtypes. Default = 64 Bit.
|
|
1218
|
-
precision =
|
|
998
|
+
precision = Enum('float64', 'float32', desc='precision (32/64 Bit) of result, corresponding to numpy dtypes')
|
|
1219
999
|
|
|
1220
1000
|
#: The actual point spread function.
|
|
1221
1001
|
psf = Property(desc='point spread function')
|
|
@@ -1227,7 +1007,7 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1227
1007
|
h5f = Instance(H5CacheFileBase, transient=True)
|
|
1228
1008
|
|
|
1229
1009
|
# internal identifier
|
|
1230
|
-
digest = Property(depends_on=['
|
|
1010
|
+
digest = Property(depends_on=['steer.digest', 'precision'], cached=True)
|
|
1231
1011
|
|
|
1232
1012
|
@cached_property
|
|
1233
1013
|
def _get_digest(self):
|
|
@@ -1239,7 +1019,7 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1239
1019
|
exist and global caching mode is 'readonly'.
|
|
1240
1020
|
"""
|
|
1241
1021
|
filename = 'psf' + self.digest
|
|
1242
|
-
nodename = ('Hz_
|
|
1022
|
+
nodename = (f'Hz_{self.freq:.2f}').replace('.', '_')
|
|
1243
1023
|
# print("get cachefile:", filename)
|
|
1244
1024
|
H5cache.get_cache_file(self, filename)
|
|
1245
1025
|
if not self.h5f: # only happens in case of global caching readonly
|
|
@@ -1336,25 +1116,25 @@ class PointSpreadFunction(HasPrivateTraits):
|
|
|
1336
1116
|
Parameters
|
|
1337
1117
|
----------
|
|
1338
1118
|
ind : list of int
|
|
1339
|
-
Indices of gridpoints which are assumed to be sources.
|
|
1340
|
-
|
|
1119
|
+
Indices of gridpoints which are assumed to be sources. Normalization factor for the
|
|
1120
|
+
beamforming result (e.g. removal of diag is compensated with this.)
|
|
1341
1121
|
|
|
1342
1122
|
Returns
|
|
1343
1123
|
-------
|
|
1344
1124
|
The psf [1, nGridPoints, len(ind)]
|
|
1345
|
-
|
|
1346
1125
|
"""
|
|
1347
|
-
if type(self.steer)
|
|
1126
|
+
if type(self.steer) is SteeringVector: # for simple steering vector, use faster method
|
|
1348
1127
|
result = calcPointSpreadFunction(
|
|
1349
1128
|
self.steer.steer_type,
|
|
1350
1129
|
self.steer.r0,
|
|
1351
1130
|
self.steer.rm,
|
|
1352
|
-
2 * pi * self.freq / self.env.c,
|
|
1131
|
+
2 * pi * self.freq / self.steer.env.c,
|
|
1353
1132
|
ind,
|
|
1354
1133
|
self.precision,
|
|
1355
1134
|
)
|
|
1356
|
-
else:
|
|
1357
|
-
# there is a version of this in
|
|
1135
|
+
else:
|
|
1136
|
+
# for arbitrary steering sectors, use general calculation. there is a version of this in
|
|
1137
|
+
# fastFuncs, may be used later after runtime testing and debugging
|
|
1358
1138
|
product = dot(self.steer.steer_vector(self.freq).conj(), self.steer.transfer(self.freq, ind).T)
|
|
1359
1139
|
result = (product * product.conj()).real
|
|
1360
1140
|
return result
|
|
@@ -1372,10 +1152,10 @@ class BeamformerDamas(BeamformerBase):
|
|
|
1372
1152
|
beamformer = Property()
|
|
1373
1153
|
|
|
1374
1154
|
# private storage of beamformer instance
|
|
1375
|
-
_beamformer =
|
|
1155
|
+
_beamformer = Instance(BeamformerBase)
|
|
1376
1156
|
|
|
1377
1157
|
#: The floating-number-precision of the PSFs. Default is 64 bit.
|
|
1378
|
-
psf_precision =
|
|
1158
|
+
psf_precision = Enum('float64', 'float32', desc='precision of PSF')
|
|
1379
1159
|
|
|
1380
1160
|
#: Number of iterations, defaults to 100.
|
|
1381
1161
|
n_iter = Int(100, desc='number of iterations')
|
|
@@ -1385,7 +1165,7 @@ class BeamformerDamas(BeamformerBase):
|
|
|
1385
1165
|
|
|
1386
1166
|
#: Flag that defines how to calculate and store the point spread function,
|
|
1387
1167
|
#: defaults to 'full'. See :attr:`PointSpreadFunction.calcmode` for details.
|
|
1388
|
-
calcmode =
|
|
1168
|
+
calcmode = Enum('full', 'single', 'block', 'readonly', desc='mode of psf calculation / storage')
|
|
1389
1169
|
|
|
1390
1170
|
# internal identifier
|
|
1391
1171
|
digest = Property(
|
|
@@ -1460,11 +1240,12 @@ class BeamformerDamas(BeamformerBase):
|
|
|
1460
1240
|
self._fr[i] = 1
|
|
1461
1241
|
|
|
1462
1242
|
|
|
1243
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
1463
1244
|
class BeamformerDamasPlus(BeamformerDamas):
|
|
1464
|
-
"""DAMAS deconvolution :cite:`Brooks2006` for solving the system of equations, instead of the
|
|
1465
|
-
iterations, this class employs the NNLS or linear programming solvers from
|
|
1466
|
-
scipy.optimize or one
|
|
1467
|
-
|
|
1245
|
+
"""DAMAS deconvolution :cite:`Brooks2006` for solving the system of equations, instead of the
|
|
1246
|
+
original Gauss-Seidel iterations, this class employs the NNLS or linear programming solvers from
|
|
1247
|
+
scipy.optimize or one of several optimization algorithms from the scikit-learn module. Needs
|
|
1248
|
+
a-priori delay-and-sum beamforming (:class:`BeamformerBase`).
|
|
1468
1249
|
"""
|
|
1469
1250
|
|
|
1470
1251
|
#: Type of fit method to be used ('LassoLars',
|
|
@@ -1472,7 +1253,7 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1472
1253
|
#: These methods are implemented in
|
|
1473
1254
|
#: the `scikit-learn <http://scikit-learn.org/stable/user_guide.html>`_
|
|
1474
1255
|
#: module or within scipy.optimize respectively.
|
|
1475
|
-
method =
|
|
1256
|
+
method = Enum('NNLS', 'LP', 'LassoLars', 'OMPCV', desc='method used for solving deconvolution problem')
|
|
1476
1257
|
|
|
1477
1258
|
#: Weight factor for LassoLars method,
|
|
1478
1259
|
#: defaults to 0.0.
|
|
@@ -1482,7 +1263,7 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1482
1263
|
#: Maximum number of iterations,
|
|
1483
1264
|
#: tradeoff between speed and precision;
|
|
1484
1265
|
#: defaults to 500
|
|
1485
|
-
|
|
1266
|
+
n_iter = Int(500, desc='maximum number of iterations')
|
|
1486
1267
|
|
|
1487
1268
|
#: Unit multiplier for evaluating, e.g., nPa instead of Pa.
|
|
1488
1269
|
#: Values are converted back before returning.
|
|
@@ -1492,7 +1273,7 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1492
1273
|
|
|
1493
1274
|
# internal identifier
|
|
1494
1275
|
digest = Property(
|
|
1495
|
-
depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['alpha', 'method', '
|
|
1276
|
+
depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['alpha', 'method', 'n_iter', 'unit_mult'],
|
|
1496
1277
|
)
|
|
1497
1278
|
|
|
1498
1279
|
@cached_property
|
|
@@ -1552,19 +1333,17 @@ class BeamformerDamasPlus(BeamformerDamas):
|
|
|
1552
1333
|
self._ac[i] = linprog(c=cT, A_ub=psf, b_ub=y).x / unit # defaults to simplex method and non-negative x
|
|
1553
1334
|
else:
|
|
1554
1335
|
if self.method == 'LassoLars':
|
|
1555
|
-
model = LassoLars(
|
|
1556
|
-
alpha=self.alpha * unit,
|
|
1557
|
-
max_iter=self.max_iter,
|
|
1558
|
-
)
|
|
1336
|
+
model = LassoLars(alpha=self.alpha * unit, max_iter=self.n_iter, positive=True)
|
|
1559
1337
|
elif self.method == 'OMPCV':
|
|
1560
1338
|
model = OrthogonalMatchingPursuitCV()
|
|
1561
1339
|
else:
|
|
1562
1340
|
msg = f'Method {self.method} not implemented.'
|
|
1563
1341
|
raise NotImplementedError(msg)
|
|
1564
1342
|
model.normalize = False
|
|
1565
|
-
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1566
|
-
# with StandardScaler does scale in a different way, thus we
|
|
1567
|
-
# ourselves to make results the same over
|
|
1343
|
+
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1344
|
+
# pipeline approach with StandardScaler does scale in a different way, thus we
|
|
1345
|
+
# monkeypatch the code and normalize ourselves to make results the same over
|
|
1346
|
+
# different sklearn versions
|
|
1568
1347
|
norms = norm(psf, axis=0)
|
|
1569
1348
|
# get rid of annoying sklearn warnings that appear
|
|
1570
1349
|
# for sklearn<1.2 despite any settings
|
|
@@ -1590,7 +1369,7 @@ class BeamformerOrth(BeamformerBase):
|
|
|
1590
1369
|
beamformer = Property()
|
|
1591
1370
|
|
|
1592
1371
|
# private storage of beamformer instance
|
|
1593
|
-
_beamformer =
|
|
1372
|
+
_beamformer = Instance(BeamformerEig)
|
|
1594
1373
|
|
|
1595
1374
|
#: List of components to consider, use this to directly set the eigenvalues
|
|
1596
1375
|
#: used in the beamformer. Alternatively, set :attr:`n`.
|
|
@@ -1657,7 +1436,7 @@ class BeamformerOrth(BeamformerBase):
|
|
|
1657
1436
|
|
|
1658
1437
|
"""
|
|
1659
1438
|
f = self._f
|
|
1660
|
-
|
|
1439
|
+
num_channels = self.freq_data.num_channels
|
|
1661
1440
|
normfactor = self.sig_loss_norm()
|
|
1662
1441
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
1663
1442
|
for i in ind:
|
|
@@ -1671,10 +1450,11 @@ class BeamformerOrth(BeamformerBase):
|
|
|
1671
1450
|
steer_vector(f[i]),
|
|
1672
1451
|
(ones(1), eve[:, n].reshape((-1, 1))),
|
|
1673
1452
|
)[0]
|
|
1674
|
-
self._ac[i, beamformerOutput.argmax()] += eva[n] /
|
|
1453
|
+
self._ac[i, beamformerOutput.argmax()] += eva[n] / num_channels
|
|
1675
1454
|
self._fr[i] = 1
|
|
1676
1455
|
|
|
1677
1456
|
|
|
1457
|
+
@deprecated_alias({'n': 'n_iter'})
|
|
1678
1458
|
class BeamformerCleansc(BeamformerBase):
|
|
1679
1459
|
"""CLEAN-SC deconvolution algorithm.
|
|
1680
1460
|
|
|
@@ -1683,8 +1463,8 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1683
1463
|
"""
|
|
1684
1464
|
|
|
1685
1465
|
#: no of CLEAN-SC iterations
|
|
1686
|
-
#: defaults to 0, i.e. automatic (max 2*
|
|
1687
|
-
|
|
1466
|
+
#: defaults to 0, i.e. automatic (max 2*num_channels)
|
|
1467
|
+
n_iter = Int(0, desc='no of iterations')
|
|
1688
1468
|
|
|
1689
1469
|
#: iteration damping factor
|
|
1690
1470
|
#: defaults to 0.6
|
|
@@ -1696,7 +1476,7 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1696
1476
|
stopn = Int(3, desc='stop criterion index')
|
|
1697
1477
|
|
|
1698
1478
|
# internal identifier
|
|
1699
|
-
digest = Property(depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['
|
|
1479
|
+
digest = Property(depends_on=BEAMFORMER_BASE_DIGEST_DEPENDENCIES + ['n_iter', 'damp', 'stopn'])
|
|
1700
1480
|
|
|
1701
1481
|
@cached_property
|
|
1702
1482
|
def _get_digest(self):
|
|
@@ -1722,9 +1502,9 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1722
1502
|
"""
|
|
1723
1503
|
f = self._f
|
|
1724
1504
|
normfactor = self.sig_loss_norm()
|
|
1725
|
-
|
|
1505
|
+
num_channels = self.freq_data.num_channels
|
|
1726
1506
|
result = zeros((self.steer.grid.size), 'f')
|
|
1727
|
-
J =
|
|
1507
|
+
J = num_channels * 2 if not self.n_iter else self.n_iter
|
|
1728
1508
|
powers = zeros(J, 'd')
|
|
1729
1509
|
|
|
1730
1510
|
param_steer_type, steer_vector = self._beamformer_params()
|
|
@@ -1751,7 +1531,8 @@ class BeamformerCleansc(BeamformerBase):
|
|
|
1751
1531
|
hh = hh[:, newaxis]
|
|
1752
1532
|
csm1 = hmax * (hh * hh.conj().T)
|
|
1753
1533
|
|
|
1754
|
-
# h1 = self.steer._beamformerCall(f[i], self.r_diag, normfactor,
|
|
1534
|
+
# h1 = self.steer._beamformerCall(f[i], self.r_diag, normfactor, \
|
|
1535
|
+
# (array((hmax, ))[newaxis, :], hh[newaxis, :].conjugate()))[0]
|
|
1755
1536
|
h1 = beamformerFreq(
|
|
1756
1537
|
param_steer_type,
|
|
1757
1538
|
self.r_diag,
|
|
@@ -1777,10 +1558,10 @@ class BeamformerClean(BeamformerBase):
|
|
|
1777
1558
|
beamformer = Property()
|
|
1778
1559
|
|
|
1779
1560
|
# private storage of beamformer instance
|
|
1780
|
-
_beamformer =
|
|
1561
|
+
_beamformer = Instance(BeamformerBase)
|
|
1781
1562
|
|
|
1782
1563
|
#: The floating-number-precision of the PSFs. Default is 64 bit.
|
|
1783
|
-
psf_precision =
|
|
1564
|
+
psf_precision = Enum('float64', 'float32', desc='precision of PSF.')
|
|
1784
1565
|
|
|
1785
1566
|
# iteration damping factor
|
|
1786
1567
|
# defaults to 0.6
|
|
@@ -1790,7 +1571,7 @@ class BeamformerClean(BeamformerBase):
|
|
|
1790
1571
|
n_iter = Int(100, desc='maximum number of iterations')
|
|
1791
1572
|
|
|
1792
1573
|
# how to calculate and store the psf
|
|
1793
|
-
calcmode =
|
|
1574
|
+
calcmode = Enum('block', 'full', 'single', 'readonly', desc='mode of psf calculation / storage')
|
|
1794
1575
|
|
|
1795
1576
|
# internal identifier
|
|
1796
1577
|
digest = Property(
|
|
@@ -1885,6 +1666,7 @@ class BeamformerClean(BeamformerBase):
|
|
|
1885
1666
|
self._fr[i] = 1
|
|
1886
1667
|
|
|
1887
1668
|
|
|
1669
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
1888
1670
|
class BeamformerCMF(BeamformerBase):
|
|
1889
1671
|
"""Covariance Matrix Fitting algorithm.
|
|
1890
1672
|
|
|
@@ -1897,7 +1679,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1897
1679
|
#: These methods are implemented in
|
|
1898
1680
|
#: the `scikit-learn <http://scikit-learn.org/stable/user_guide.html>`_
|
|
1899
1681
|
#: module.
|
|
1900
|
-
method =
|
|
1682
|
+
method = Enum(
|
|
1901
1683
|
'LassoLars',
|
|
1902
1684
|
'LassoLarsBIC',
|
|
1903
1685
|
'OMPCV',
|
|
@@ -1913,10 +1695,11 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1913
1695
|
#: (Use values in the order of 10^⁻9 for good results.)
|
|
1914
1696
|
alpha = Range(0.0, 1.0, 0.0, desc='Lasso weight factor')
|
|
1915
1697
|
|
|
1916
|
-
#:
|
|
1698
|
+
#: Total or maximum number of iterations
|
|
1699
|
+
#: (depending on :attr:`method`),
|
|
1917
1700
|
#: tradeoff between speed and precision;
|
|
1918
1701
|
#: defaults to 500
|
|
1919
|
-
|
|
1702
|
+
n_iter = Int(500, desc='maximum number of iterations')
|
|
1920
1703
|
|
|
1921
1704
|
#: Unit multiplier for evaluating, e.g., nPa instead of Pa.
|
|
1922
1705
|
#: Values are converted back before returning.
|
|
@@ -1924,7 +1707,8 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1924
1707
|
#: within fitting method algorithms. Defaults to 1e9.
|
|
1925
1708
|
unit_mult = Float(1e9, desc='unit multiplier')
|
|
1926
1709
|
|
|
1927
|
-
#: If True, shows the status of the PyLops solver. Only relevant in case of FISTA or
|
|
1710
|
+
#: If True, shows the status of the PyLops solver. Only relevant in case of FISTA or
|
|
1711
|
+
#: Split_Bregman
|
|
1928
1712
|
show = Bool(False, desc='show output of PyLops solvers')
|
|
1929
1713
|
|
|
1930
1714
|
#: Energy normalization in case of diagonal removal not implemented for inverse methods.
|
|
@@ -1939,7 +1723,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1939
1723
|
'freq_data.digest',
|
|
1940
1724
|
'alpha',
|
|
1941
1725
|
'method',
|
|
1942
|
-
'
|
|
1726
|
+
'n_iter',
|
|
1943
1727
|
'unit_mult',
|
|
1944
1728
|
'r_diag',
|
|
1945
1729
|
'precision',
|
|
@@ -1985,8 +1769,8 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1985
1769
|
return vstack([matrix.real, matrix.imag])
|
|
1986
1770
|
|
|
1987
1771
|
# prepare calculation
|
|
1988
|
-
nc = self.freq_data.
|
|
1989
|
-
|
|
1772
|
+
nc = self.freq_data.num_channels
|
|
1773
|
+
num_points = self.steer.grid.size
|
|
1990
1774
|
unit = self.unit_mult
|
|
1991
1775
|
|
|
1992
1776
|
for i in ind:
|
|
@@ -1996,7 +1780,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
1996
1780
|
|
|
1997
1781
|
# reduced Kronecker product (only where solution matrix != 0)
|
|
1998
1782
|
Bc = (h[:, :, newaxis] * h.conjugate().T[newaxis, :, :]).transpose(2, 0, 1)
|
|
1999
|
-
Ac = Bc.reshape(nc * nc,
|
|
1783
|
+
Ac = Bc.reshape(nc * nc, num_points)
|
|
2000
1784
|
|
|
2001
1785
|
# get indices for upper triangular matrices (use tril b/c transposed)
|
|
2002
1786
|
ind = reshape(tril(ones((nc, nc))), (nc * nc,)) > 0
|
|
@@ -2015,24 +1799,25 @@ class BeamformerCMF(BeamformerBase):
|
|
|
2015
1799
|
R = realify(reshape(csm.T, (nc * nc, 1))[ind, :])[ind_reim, :] * unit
|
|
2016
1800
|
# choose method
|
|
2017
1801
|
if self.method == 'LassoLars':
|
|
2018
|
-
model = LassoLars(alpha=self.alpha * unit, max_iter=self.
|
|
1802
|
+
model = LassoLars(alpha=self.alpha * unit, max_iter=self.n_iter, positive=True, **sklearn_ndict)
|
|
2019
1803
|
elif self.method == 'LassoLarsBIC':
|
|
2020
|
-
model = LassoLarsIC(criterion='bic', max_iter=self.
|
|
1804
|
+
model = LassoLarsIC(criterion='bic', max_iter=self.n_iter, positive=True, **sklearn_ndict)
|
|
2021
1805
|
elif self.method == 'OMPCV':
|
|
2022
1806
|
model = OrthogonalMatchingPursuitCV(**sklearn_ndict)
|
|
2023
1807
|
elif self.method == 'NNLS':
|
|
2024
1808
|
model = LinearRegression(positive=True)
|
|
2025
1809
|
|
|
2026
1810
|
if self.method == 'Split_Bregman' and config.have_pylops:
|
|
2027
|
-
from pylops import Identity, MatrixMult
|
|
2028
|
-
|
|
2029
|
-
|
|
2030
|
-
|
|
2031
|
-
self.
|
|
2032
|
-
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
1811
|
+
from pylops import Identity, MatrixMult
|
|
1812
|
+
from pylops.optimization.sparsity import splitbregman
|
|
1813
|
+
|
|
1814
|
+
Oop = MatrixMult(A) # transfer operator
|
|
1815
|
+
Iop = self.alpha * Identity(num_points) # regularisation
|
|
1816
|
+
self._ac[i], iterations, cost = splitbregman(
|
|
1817
|
+
Op=Oop,
|
|
1818
|
+
RegsL1=[Iop],
|
|
1819
|
+
y=R[:, 0],
|
|
1820
|
+
niter_outer=self.n_iter,
|
|
2036
1821
|
niter_inner=5,
|
|
2037
1822
|
RegsL2=None,
|
|
2038
1823
|
dataregsL2=None,
|
|
@@ -2045,17 +1830,16 @@ class BeamformerCMF(BeamformerBase):
|
|
|
2045
1830
|
self._ac[i] /= unit
|
|
2046
1831
|
|
|
2047
1832
|
elif self.method == 'FISTA' and config.have_pylops:
|
|
2048
|
-
from pylops import
|
|
1833
|
+
from pylops import MatrixMult
|
|
1834
|
+
from pylops.optimization.sparsity import fista
|
|
2049
1835
|
|
|
2050
|
-
Oop = MatrixMult(A) #
|
|
2051
|
-
self._ac[i], iterations =
|
|
1836
|
+
Oop = MatrixMult(A) # transfer operator
|
|
1837
|
+
self._ac[i], iterations, cost = fista(
|
|
2052
1838
|
Op=Oop,
|
|
2053
|
-
|
|
2054
|
-
niter=self.
|
|
1839
|
+
y=R[:, 0],
|
|
1840
|
+
niter=self.n_iter,
|
|
2055
1841
|
eps=self.alpha,
|
|
2056
1842
|
alpha=None,
|
|
2057
|
-
eigsiter=None,
|
|
2058
|
-
eigstol=0,
|
|
2059
1843
|
tol=1e-10,
|
|
2060
1844
|
show=self.show,
|
|
2061
1845
|
)
|
|
@@ -2070,9 +1854,9 @@ class BeamformerCMF(BeamformerBase):
|
|
|
2070
1854
|
return func[0].T, der[:, 0]
|
|
2071
1855
|
|
|
2072
1856
|
# initial guess
|
|
2073
|
-
x0 = ones([
|
|
2074
|
-
#
|
|
2075
|
-
|
|
1857
|
+
x0 = ones([num_points])
|
|
1858
|
+
# boundaries - set to non negative
|
|
1859
|
+
boundaries = tile((0, +inf), (len(x0), 1))
|
|
2076
1860
|
|
|
2077
1861
|
# optimize
|
|
2078
1862
|
self._ac[i], yval, dicts = fmin_l_bfgs_b(
|
|
@@ -2081,14 +1865,14 @@ class BeamformerCMF(BeamformerBase):
|
|
|
2081
1865
|
fprime=None,
|
|
2082
1866
|
args=(),
|
|
2083
1867
|
approx_grad=0,
|
|
2084
|
-
bounds=
|
|
1868
|
+
bounds=boundaries,
|
|
2085
1869
|
m=10,
|
|
2086
1870
|
factr=10000000.0,
|
|
2087
1871
|
pgtol=1e-05,
|
|
2088
1872
|
epsilon=1e-08,
|
|
2089
1873
|
iprint=-1,
|
|
2090
1874
|
maxfun=15000,
|
|
2091
|
-
maxiter=self.
|
|
1875
|
+
maxiter=self.n_iter,
|
|
2092
1876
|
disp=None,
|
|
2093
1877
|
callback=None,
|
|
2094
1878
|
maxls=20,
|
|
@@ -2096,11 +1880,12 @@ class BeamformerCMF(BeamformerBase):
|
|
|
2096
1880
|
|
|
2097
1881
|
self._ac[i] /= unit
|
|
2098
1882
|
else:
|
|
2099
|
-
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
2100
|
-
# approach with StandardScaler does scale in a different way, thus we
|
|
2101
|
-
# code and normalize ourselves to make results the same over
|
|
1883
|
+
# from sklearn 1.2, normalize=True does not work the same way anymore and the
|
|
1884
|
+
# pipeline approach with StandardScaler does scale in a different way, thus we
|
|
1885
|
+
# monkeypatch the code and normalize ourselves to make results the same over
|
|
1886
|
+
# different sklearn versions
|
|
2102
1887
|
norms = norm(A, axis=0)
|
|
2103
|
-
# get rid of
|
|
1888
|
+
# get rid of sklearn warnings that appear for sklearn<1.2 despite any settings
|
|
2104
1889
|
with warnings.catch_warnings():
|
|
2105
1890
|
warnings.simplefilter('ignore', category=FutureWarning)
|
|
2106
1891
|
# normalized A
|
|
@@ -2110,6 +1895,7 @@ class BeamformerCMF(BeamformerBase):
|
|
|
2110
1895
|
self._fr[i] = 1
|
|
2111
1896
|
|
|
2112
1897
|
|
|
1898
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
2113
1899
|
class BeamformerSODIX(BeamformerBase):
|
|
2114
1900
|
"""Source directivity modeling in the cross-spectral matrix (SODIX) algorithm.
|
|
2115
1901
|
|
|
@@ -2119,12 +1905,12 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2119
1905
|
#: Type of fit method to be used ('fmin_l_bfgs_b').
|
|
2120
1906
|
#: These methods are implemented in
|
|
2121
1907
|
#: the scipy module.
|
|
2122
|
-
method =
|
|
1908
|
+
method = Enum('fmin_l_bfgs_b', desc='fit method used')
|
|
2123
1909
|
|
|
2124
1910
|
#: Maximum number of iterations,
|
|
2125
1911
|
#: tradeoff between speed and precision;
|
|
2126
1912
|
#: defaults to 200
|
|
2127
|
-
|
|
1913
|
+
n_iter = Int(200, desc='maximum number of iterations')
|
|
2128
1914
|
|
|
2129
1915
|
#: Weight factor for regularization,
|
|
2130
1916
|
#: defaults to 0.0.
|
|
@@ -2148,7 +1934,7 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2148
1934
|
'freq_data.digest',
|
|
2149
1935
|
'alpha',
|
|
2150
1936
|
'method',
|
|
2151
|
-
'
|
|
1937
|
+
'n_iter',
|
|
2152
1938
|
'unit_mult',
|
|
2153
1939
|
'r_diag',
|
|
2154
1940
|
'precision',
|
|
@@ -2180,7 +1966,7 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2180
1966
|
"""
|
|
2181
1967
|
# prepare calculation
|
|
2182
1968
|
f = self._f
|
|
2183
|
-
|
|
1969
|
+
num_points = self.steer.grid.size
|
|
2184
1970
|
# unit = self.unit_mult
|
|
2185
1971
|
num_mics = self.steer.mics.num_mics
|
|
2186
1972
|
# SODIX needs special treatment as the result from one frequency is used to
|
|
@@ -2201,18 +1987,18 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2201
1987
|
"""Parameters
|
|
2202
1988
|
----------
|
|
2203
1989
|
directions
|
|
2204
|
-
[
|
|
1990
|
+
[num_points*num_mics]
|
|
2205
1991
|
|
|
2206
1992
|
Returns
|
|
2207
1993
|
-------
|
|
2208
1994
|
func - Sodix function to optimize
|
|
2209
1995
|
[1]
|
|
2210
1996
|
derdrl - derivitaives in direction of D
|
|
2211
|
-
[num_mics*
|
|
1997
|
+
[num_mics*num_points].
|
|
2212
1998
|
|
|
2213
1999
|
"""
|
|
2214
2000
|
#### the sodix function ####
|
|
2215
|
-
Djm = directions.reshape([
|
|
2001
|
+
Djm = directions.reshape([num_points, num_mics])
|
|
2216
2002
|
p = h.T * Djm
|
|
2217
2003
|
csm_mod = dot(p.T, p.conj())
|
|
2218
2004
|
Q = csm - csm_mod
|
|
@@ -2230,33 +2016,33 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2230
2016
|
|
|
2231
2017
|
##### initial guess ####
|
|
2232
2018
|
if not self._fr[(i - 1)]:
|
|
2233
|
-
D0 = ones([
|
|
2019
|
+
D0 = ones([num_points, num_mics])
|
|
2234
2020
|
else:
|
|
2235
2021
|
D0 = sqrt(
|
|
2236
2022
|
self._ac[(i - 1)]
|
|
2237
2023
|
* real(trace(csm) / trace(array(self.freq_data.csm[i - 1], dtype='complex128', copy=1))),
|
|
2238
2024
|
)
|
|
2239
2025
|
|
|
2240
|
-
#
|
|
2241
|
-
|
|
2026
|
+
# boundaries - set to non negative [2*(num_points*num_mics)]
|
|
2027
|
+
boundaries = tile((0, +inf), (num_points * num_mics, 1))
|
|
2242
2028
|
|
|
2243
2029
|
# optimize with gradient solver
|
|
2244
2030
|
# see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
|
|
2245
2031
|
|
|
2246
|
-
qi = ones([
|
|
2032
|
+
qi = ones([num_points, num_mics])
|
|
2247
2033
|
qi, yval, dicts = fmin_l_bfgs_b(
|
|
2248
2034
|
function,
|
|
2249
2035
|
D0,
|
|
2250
2036
|
fprime=None,
|
|
2251
2037
|
args=(),
|
|
2252
2038
|
approx_grad=0,
|
|
2253
|
-
bounds=
|
|
2039
|
+
bounds=boundaries,
|
|
2254
2040
|
factr=100.0,
|
|
2255
2041
|
pgtol=1e-12,
|
|
2256
2042
|
epsilon=1e-08,
|
|
2257
2043
|
iprint=-1,
|
|
2258
2044
|
maxfun=1500000,
|
|
2259
|
-
maxiter=self.
|
|
2045
|
+
maxiter=self.n_iter,
|
|
2260
2046
|
disp=-1,
|
|
2261
2047
|
callback=None,
|
|
2262
2048
|
maxls=20,
|
|
@@ -2268,6 +2054,7 @@ class BeamformerSODIX(BeamformerBase):
|
|
|
2268
2054
|
self._fr[i] = 1
|
|
2269
2055
|
|
|
2270
2056
|
|
|
2057
|
+
@deprecated_alias({'max_iter': 'n_iter'})
|
|
2271
2058
|
class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
2272
2059
|
"""Beamforming GIB methods with different normalizations.
|
|
2273
2060
|
|
|
@@ -2280,17 +2067,18 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2280
2067
|
#: within fitting method algorithms. Defaults to 1e9.
|
|
2281
2068
|
unit_mult = Float(1e9, desc='unit multiplier')
|
|
2282
2069
|
|
|
2283
|
-
#:
|
|
2070
|
+
#: Total or maximum number of iterations
|
|
2071
|
+
#: (depending on :attr:`method`),
|
|
2284
2072
|
#: tradeoff between speed and precision;
|
|
2285
2073
|
#: defaults to 10
|
|
2286
|
-
|
|
2074
|
+
n_iter = Int(10, desc='maximum number of iterations')
|
|
2287
2075
|
|
|
2288
2076
|
#: Type of fit method to be used ('Suzuki', 'LassoLars', 'LassoLarsCV', 'LassoLarsBIC',
|
|
2289
2077
|
#: 'OMPCV' or 'NNLS', defaults to 'Suzuki').
|
|
2290
2078
|
#: These methods are implemented in
|
|
2291
2079
|
#: the `scikit-learn <http://scikit-learn.org/stable/user_guide.html>`_
|
|
2292
2080
|
#: module.
|
|
2293
|
-
method =
|
|
2081
|
+
method = Enum(
|
|
2294
2082
|
'Suzuki',
|
|
2295
2083
|
'InverseIRLS',
|
|
2296
2084
|
'LassoLars',
|
|
@@ -2336,7 +2124,7 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2336
2124
|
'precision',
|
|
2337
2125
|
'alpha',
|
|
2338
2126
|
'method',
|
|
2339
|
-
'
|
|
2127
|
+
'n_iter',
|
|
2340
2128
|
'unit_mult',
|
|
2341
2129
|
'eps_perc',
|
|
2342
2130
|
'pnorm',
|
|
@@ -2379,13 +2167,13 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2379
2167
|
f = self._f
|
|
2380
2168
|
n = int(self.na) # number of eigenvalues
|
|
2381
2169
|
m = int(self.m) # number of first eigenvalue
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
hh = zeros((1,
|
|
2170
|
+
num_channels = self.freq_data.num_channels # number of channels
|
|
2171
|
+
num_points = self.steer.grid.size
|
|
2172
|
+
hh = zeros((1, num_points, num_channels), dtype='D')
|
|
2385
2173
|
|
|
2386
2174
|
# Generate a cross spectral matrix, and perform the eigenvalue decomposition
|
|
2387
2175
|
for i in ind:
|
|
2388
|
-
# for monopole and source
|
|
2176
|
+
# for monopole and source strength Q needs to define density
|
|
2389
2177
|
# calculate a transfer matrix A
|
|
2390
2178
|
hh = self.steer.transfer(f[i])
|
|
2391
2179
|
A = hh.T
|
|
@@ -2394,33 +2182,34 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2394
2182
|
eva, eve = eigh(csm)
|
|
2395
2183
|
eva = eva[::-1]
|
|
2396
2184
|
eve = eve[:, ::-1]
|
|
2397
|
-
|
|
2185
|
+
# set small values zo 0, lowers numerical errors in simulated data
|
|
2186
|
+
eva[eva < max(eva) / 1e12] = 0
|
|
2398
2187
|
# init sources
|
|
2399
|
-
qi = zeros([n + m,
|
|
2400
|
-
# Select the number of coherent modes to be processed referring to the eigenvalue
|
|
2401
|
-
#
|
|
2188
|
+
qi = zeros([n + m, num_points], dtype='complex128')
|
|
2189
|
+
# Select the number of coherent modes to be processed referring to the eigenvalue
|
|
2190
|
+
# distribution.
|
|
2402
2191
|
for s in list(range(m, n + m)):
|
|
2403
2192
|
if eva[s] > 0:
|
|
2404
2193
|
# Generate the corresponding eigenmodes
|
|
2405
2194
|
emode = array(sqrt(eva[s]) * eve[:, s], dtype='complex128')
|
|
2406
2195
|
# choose method for computation
|
|
2407
2196
|
if self.method == 'Suzuki':
|
|
2408
|
-
leftpoints =
|
|
2409
|
-
locpoints = arange(
|
|
2410
|
-
weights = diag(ones(
|
|
2411
|
-
epsilon = arange(self.
|
|
2412
|
-
for it in arange(self.
|
|
2413
|
-
if
|
|
2197
|
+
leftpoints = num_points
|
|
2198
|
+
locpoints = arange(num_points)
|
|
2199
|
+
weights = diag(ones(num_points))
|
|
2200
|
+
epsilon = arange(self.n_iter)
|
|
2201
|
+
for it in arange(self.n_iter):
|
|
2202
|
+
if num_channels <= leftpoints:
|
|
2414
2203
|
AWA = dot(dot(A[:, locpoints], weights), A[:, locpoints].conj().T)
|
|
2415
2204
|
epsilon[it] = max(absolute(eigvals(AWA))) * self.eps_perc
|
|
2416
2205
|
qi[s, locpoints] = dot(
|
|
2417
2206
|
dot(
|
|
2418
2207
|
dot(weights, A[:, locpoints].conj().T),
|
|
2419
|
-
inv(AWA + eye(
|
|
2208
|
+
inv(AWA + eye(num_channels) * epsilon[it]),
|
|
2420
2209
|
),
|
|
2421
2210
|
emode,
|
|
2422
2211
|
)
|
|
2423
|
-
elif
|
|
2212
|
+
elif num_channels > leftpoints:
|
|
2424
2213
|
AA = dot(A[:, locpoints].conj().T, A[:, locpoints])
|
|
2425
2214
|
epsilon[it] = max(absolute(eigvals(AA))) * self.eps_perc
|
|
2426
2215
|
qi[s, locpoints] = dot(
|
|
@@ -2428,8 +2217,10 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2428
2217
|
emode,
|
|
2429
2218
|
)
|
|
2430
2219
|
if self.beta < 1 and it > 1:
|
|
2431
|
-
# Reorder from the greatest to smallest magnitude to define a
|
|
2432
|
-
|
|
2220
|
+
# Reorder from the greatest to smallest magnitude to define a
|
|
2221
|
+
# reduced-point source distribution, and reform a reduced transfer
|
|
2222
|
+
# matrix
|
|
2223
|
+
leftpoints = int(round(num_points * self.beta ** (it + 1)))
|
|
2433
2224
|
idx = argsort(abs(qi[s, locpoints]))[::-1]
|
|
2434
2225
|
# print(it, leftpoints, locpoints, idx )
|
|
2435
2226
|
locpoints = delete(locpoints, [idx[leftpoints::]])
|
|
@@ -2441,33 +2232,33 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2441
2232
|
weights = diag(absolute(qi[s, :]) ** (2 - self.pnorm))
|
|
2442
2233
|
|
|
2443
2234
|
elif self.method == 'InverseIRLS':
|
|
2444
|
-
weights = eye(
|
|
2445
|
-
locpoints = arange(
|
|
2446
|
-
for _it in arange(self.
|
|
2447
|
-
if
|
|
2235
|
+
weights = eye(num_points)
|
|
2236
|
+
locpoints = arange(num_points)
|
|
2237
|
+
for _it in arange(self.n_iter):
|
|
2238
|
+
if num_channels <= num_points:
|
|
2448
2239
|
wtwi = inv(dot(weights.T, weights))
|
|
2449
2240
|
aH = A.conj().T
|
|
2450
2241
|
qi[s, :] = dot(dot(wtwi, aH), dot(inv(dot(A, dot(wtwi, aH))), emode))
|
|
2451
2242
|
weights = diag(absolute(qi[s, :]) ** ((2 - self.pnorm) / 2))
|
|
2452
2243
|
weights = weights / sum(absolute(weights))
|
|
2453
|
-
elif
|
|
2244
|
+
elif num_channels > num_points:
|
|
2454
2245
|
wtw = dot(weights.T, weights)
|
|
2455
2246
|
qi[s, :] = dot(dot(inv(dot(dot(A.conj.T, wtw), A)), dot(A.conj().T, wtw)), emode)
|
|
2456
2247
|
weights = diag(absolute(qi[s, :]) ** ((2 - self.pnorm) / 2))
|
|
2457
2248
|
weights = weights / sum(absolute(weights))
|
|
2458
2249
|
else:
|
|
2459
|
-
locpoints = arange(
|
|
2250
|
+
locpoints = arange(num_points)
|
|
2460
2251
|
unit = self.unit_mult
|
|
2461
2252
|
AB = vstack([hstack([A.real, -A.imag]), hstack([A.imag, A.real])])
|
|
2462
2253
|
R = hstack([emode.real.T, emode.imag.T]) * unit
|
|
2463
2254
|
if self.method == 'LassoLars':
|
|
2464
|
-
model = LassoLars(alpha=self.alpha * unit, max_iter=self.
|
|
2255
|
+
model = LassoLars(alpha=self.alpha * unit, max_iter=self.n_iter, positive=True)
|
|
2465
2256
|
elif self.method == 'LassoLarsBIC':
|
|
2466
|
-
model = LassoLarsIC(criterion='bic', max_iter=self.
|
|
2257
|
+
model = LassoLarsIC(criterion='bic', max_iter=self.n_iter, positive=True)
|
|
2467
2258
|
elif self.method == 'OMPCV':
|
|
2468
2259
|
model = OrthogonalMatchingPursuitCV()
|
|
2469
2260
|
elif self.method == 'LassoLarsCV':
|
|
2470
|
-
model = LassoLarsCV()
|
|
2261
|
+
model = LassoLarsCV(max_iter=self.n_iter, positive=True)
|
|
2471
2262
|
elif self.method == 'NNLS':
|
|
2472
2263
|
model = LinearRegression(positive=True)
|
|
2473
2264
|
model.normalize = False
|
|
@@ -2494,15 +2285,16 @@ class BeamformerGIB(BeamformerEig): # BeamformerEig #BeamformerBase
|
|
|
2494
2285
|
Warning,
|
|
2495
2286
|
stacklevel=2,
|
|
2496
2287
|
)
|
|
2497
|
-
# Generate source maps of all selected eigenmodes, and superpose source intensity
|
|
2498
|
-
|
|
2288
|
+
# Generate source maps of all selected eigenmodes, and superpose source intensity
|
|
2289
|
+
# for each source type.
|
|
2290
|
+
temp = zeros(num_points)
|
|
2499
2291
|
temp[locpoints] = sum(absolute(qi[:, locpoints]) ** 2, axis=0)
|
|
2500
2292
|
self._ac[i] = temp
|
|
2501
2293
|
self._fr[i] = 1
|
|
2502
2294
|
|
|
2503
2295
|
|
|
2504
2296
|
class BeamformerAdaptiveGrid(BeamformerBase, Grid):
|
|
2505
|
-
"""
|
|
2297
|
+
"""Abstract base class for array methods without predefined grid."""
|
|
2506
2298
|
|
|
2507
2299
|
# the grid positions live in a shadow trait
|
|
2508
2300
|
_gpos = Any
|
|
@@ -2510,7 +2302,7 @@ class BeamformerAdaptiveGrid(BeamformerBase, Grid):
|
|
|
2510
2302
|
def _get_shape(self):
|
|
2511
2303
|
return (self.size,)
|
|
2512
2304
|
|
|
2513
|
-
def
|
|
2305
|
+
def _get_pos(self):
|
|
2514
2306
|
return self._gpos
|
|
2515
2307
|
|
|
2516
2308
|
def integrate(self, sector):
|
|
@@ -2561,7 +2353,7 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2561
2353
|
n = Int(1)
|
|
2562
2354
|
|
|
2563
2355
|
#: Geometrical bounds of the search domain to consider.
|
|
2564
|
-
#: :attr:`bound`
|
|
2356
|
+
#: :attr:`bound` is a list that contains exactly three tuple of
|
|
2565
2357
|
#: (min,max) for each of the coordinates x, y, z.
|
|
2566
2358
|
#: Defaults to [(-1.,1.),(-1.,1.),(0.01,1.)]
|
|
2567
2359
|
bounds = List(Tuple(Float, Float), minlen=3, maxlen=3, value=[(-1.0, 1.0), (-1.0, 1.0), (0.01, 1.0)])
|
|
@@ -2582,7 +2374,7 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2582
2374
|
|
|
2583
2375
|
# internal identifier
|
|
2584
2376
|
digest = Property(
|
|
2585
|
-
depends_on=['freq_data.digest', '
|
|
2377
|
+
depends_on=['freq_data.digest', 'steer.digest', 'precision', 'r_diag', 'eva_list', 'bounds', 'shgo'],
|
|
2586
2378
|
)
|
|
2587
2379
|
|
|
2588
2380
|
@cached_property
|
|
@@ -2618,14 +2410,14 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2618
2410
|
"""
|
|
2619
2411
|
f = self._f
|
|
2620
2412
|
normfactor = self.sig_loss_norm()
|
|
2621
|
-
|
|
2413
|
+
num_channels = self.freq_data.num_channels
|
|
2622
2414
|
# eigenvalue number list in standard form from largest to smallest
|
|
2623
2415
|
eva_list = unique(self.eva_list % self.steer.mics.num_mics)[::-1]
|
|
2624
2416
|
steer_type = self.steer.steer_type
|
|
2625
2417
|
if steer_type == 'custom':
|
|
2626
2418
|
msg = 'custom steer_type is not implemented'
|
|
2627
2419
|
raise NotImplementedError(msg)
|
|
2628
|
-
mpos = self.steer.mics.
|
|
2420
|
+
mpos = self.steer.mics.pos
|
|
2629
2421
|
env = self.steer.env
|
|
2630
2422
|
shgo_opts = {
|
|
2631
2423
|
'n': 256,
|
|
@@ -2669,8 +2461,8 @@ class BeamformerGridlessOrth(BeamformerAdaptiveGrid):
|
|
|
2669
2461
|
# store result for position
|
|
2670
2462
|
self._gpos[:, i1] = oR['x']
|
|
2671
2463
|
# store result for level
|
|
2672
|
-
self._ac[i, i1] = eva[n] /
|
|
2673
|
-
# print(oR['x'],eva[n]/
|
|
2464
|
+
self._ac[i, i1] = eva[n] / num_channels
|
|
2465
|
+
# print(oR['x'],eva[n]/num_channels,oR)
|
|
2674
2466
|
self._fr[i] = 1
|
|
2675
2467
|
|
|
2676
2468
|
|