acoular 25.10__tar.gz → 26.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {acoular-25.10 → acoular-26.1}/PKG-INFO +53 -108
- acoular-26.1/README.md +129 -0
- {acoular-25.10 → acoular-26.1}/acoular/aiaa/aiaa.py +1 -1
- {acoular-25.10 → acoular-26.1}/acoular/base.py +7 -7
- {acoular-25.10 → acoular-26.1}/acoular/calib.py +6 -6
- acoular-25.10/acoular/demo/acoular_demo.py → acoular-26.1/acoular/demo/__init__.py +107 -135
- acoular-26.1/acoular/demo/__main__.py +37 -0
- {acoular-25.10 → acoular-26.1}/acoular/environments.py +24 -24
- {acoular-25.10 → acoular-26.1}/acoular/fbeamform.py +145 -142
- {acoular-25.10 → acoular-26.1}/acoular/fprocess.py +11 -9
- {acoular-25.10 → acoular-26.1}/acoular/grids.py +45 -211
- {acoular-25.10 → acoular-26.1}/acoular/microphones.py +8 -8
- {acoular-25.10 → acoular-26.1}/acoular/process.py +7 -14
- {acoular-25.10 → acoular-26.1}/acoular/sdinput.py +9 -9
- {acoular-25.10 → acoular-26.1}/acoular/signals.py +10 -10
- {acoular-25.10 → acoular-26.1}/acoular/sources.py +84 -68
- {acoular-25.10 → acoular-26.1}/acoular/spectra.py +27 -36
- {acoular-25.10 → acoular-26.1}/acoular/tbeamform.py +26 -26
- {acoular-25.10 → acoular-26.1}/acoular/tools/helpers.py +1 -1
- acoular-26.1/acoular/tools/utils.py +283 -0
- {acoular-25.10 → acoular-26.1}/acoular/tprocess.py +76 -63
- {acoular-25.10 → acoular-26.1}/acoular/trajectory.py +1 -2
- {acoular-25.10 → acoular-26.1}/acoular/version.py +2 -2
- {acoular-25.10 → acoular-26.1}/pyproject.toml +31 -53
- acoular-25.10/README.md +0 -133
- acoular-25.10/acoular/demo/__init__.py +0 -19
- acoular-25.10/acoular/tools/utils.py +0 -115
- {acoular-25.10 → acoular-26.1}/.gitignore +0 -0
- {acoular-25.10 → acoular-26.1}/AUTHORS.rst +0 -0
- {acoular-25.10 → acoular-26.1}/LICENSE +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/__init__.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/aiaa/__init__.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/configuration.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/deprecation.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/fastFuncs.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/h5cache.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/h5files.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/internal.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/tfastfuncs.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/tools/__init__.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/tools/metrics.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/traitsviews.py +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/HW90D240_f10.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/W90_D105_f10.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/acousticam_2c.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/acousticam_4c.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array38.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array92x.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_56.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_56_10_9.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_56_bomb.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_56_v2.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_64.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_84_10_9.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/array_84_bomb_v3.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/calib_vw_ring32.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/gfai_ring32.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/minidsp_uma-16.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/minidsp_uma-16_mirrored.xml +0 -0
- {acoular-25.10 → acoular-26.1}/acoular/xml/tub_vogel64.xml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: acoular
|
|
3
|
-
Version:
|
|
3
|
+
Version: 26.1
|
|
4
4
|
Summary: Python library for acoustic beamforming
|
|
5
5
|
Project-URL: homepage, https://acoular.org
|
|
6
6
|
Project-URL: documentation, https://acoular.org
|
|
@@ -51,65 +51,14 @@ Requires-Python: <3.14,>=3.10
|
|
|
51
51
|
Requires-Dist: numba
|
|
52
52
|
Requires-Dist: numpy
|
|
53
53
|
Requires-Dist: scikit-learn
|
|
54
|
-
Requires-Dist: scipy!=1.16
|
|
54
|
+
Requires-Dist: scipy!=1.16.0,>=1.15; python_version == '3.10'
|
|
55
|
+
Requires-Dist: scipy>=1.16.1; python_version > '3.10'
|
|
55
56
|
Requires-Dist: tables
|
|
56
57
|
Requires-Dist: traits>=6.0
|
|
57
|
-
Provides-Extra: dev
|
|
58
|
-
Requires-Dist: graphviz; extra == 'dev'
|
|
59
|
-
Requires-Dist: h5py; extra == 'dev'
|
|
60
|
-
Requires-Dist: hatch; extra == 'dev'
|
|
61
|
-
Requires-Dist: ipython; extra == 'dev'
|
|
62
|
-
Requires-Dist: matplotlib; extra == 'dev'
|
|
63
|
-
Requires-Dist: numpydoc; extra == 'dev'
|
|
64
|
-
Requires-Dist: pickleshare; extra == 'dev'
|
|
65
|
-
Requires-Dist: pydata-sphinx-theme; extra == 'dev'
|
|
66
|
-
Requires-Dist: pylops; extra == 'dev'
|
|
67
|
-
Requires-Dist: pytest; extra == 'dev'
|
|
68
|
-
Requires-Dist: pytest-cases; extra == 'dev'
|
|
69
|
-
Requires-Dist: pytest-cov; extra == 'dev'
|
|
70
|
-
Requires-Dist: pytest-env; extra == 'dev'
|
|
71
|
-
Requires-Dist: pytest-mock; extra == 'dev'
|
|
72
|
-
Requires-Dist: pytest-profiling; extra == 'dev'
|
|
73
|
-
Requires-Dist: pytest-regtest; extra == 'dev'
|
|
74
|
-
Requires-Dist: pyyaml; extra == 'dev'
|
|
75
|
-
Requires-Dist: ruff==0.8.1; extra == 'dev'
|
|
76
|
-
Requires-Dist: setuptools; extra == 'dev'
|
|
77
|
-
Requires-Dist: sounddevice; extra == 'dev'
|
|
78
|
-
Requires-Dist: sphinx; extra == 'dev'
|
|
79
|
-
Requires-Dist: sphinx-copybutton; extra == 'dev'
|
|
80
|
-
Requires-Dist: sphinx-gallery; extra == 'dev'
|
|
81
|
-
Requires-Dist: sphinxcontrib-bibtex; extra == 'dev'
|
|
82
|
-
Requires-Dist: traitsui; extra == 'dev'
|
|
83
|
-
Provides-Extra: docs
|
|
84
|
-
Requires-Dist: graphviz; extra == 'docs'
|
|
85
|
-
Requires-Dist: ipython; extra == 'docs'
|
|
86
|
-
Requires-Dist: matplotlib; extra == 'docs'
|
|
87
|
-
Requires-Dist: numpydoc; extra == 'docs'
|
|
88
|
-
Requires-Dist: pickleshare; extra == 'docs'
|
|
89
|
-
Requires-Dist: pydata-sphinx-theme; extra == 'docs'
|
|
90
|
-
Requires-Dist: setuptools; extra == 'docs'
|
|
91
|
-
Requires-Dist: sounddevice; extra == 'docs'
|
|
92
|
-
Requires-Dist: sphinx; extra == 'docs'
|
|
93
|
-
Requires-Dist: sphinx-copybutton; extra == 'docs'
|
|
94
|
-
Requires-Dist: sphinx-gallery; extra == 'docs'
|
|
95
|
-
Requires-Dist: sphinxcontrib-bibtex; extra == 'docs'
|
|
96
58
|
Provides-Extra: full
|
|
97
59
|
Requires-Dist: matplotlib; extra == 'full'
|
|
98
60
|
Requires-Dist: pylops; extra == 'full'
|
|
99
61
|
Requires-Dist: sounddevice; extra == 'full'
|
|
100
|
-
Provides-Extra: tests
|
|
101
|
-
Requires-Dist: h5py; extra == 'tests'
|
|
102
|
-
Requires-Dist: pylops; extra == 'tests'
|
|
103
|
-
Requires-Dist: pytest; extra == 'tests'
|
|
104
|
-
Requires-Dist: pytest-cases; extra == 'tests'
|
|
105
|
-
Requires-Dist: pytest-cov; extra == 'tests'
|
|
106
|
-
Requires-Dist: pytest-env; extra == 'tests'
|
|
107
|
-
Requires-Dist: pytest-mock; extra == 'tests'
|
|
108
|
-
Requires-Dist: pytest-profiling; extra == 'tests'
|
|
109
|
-
Requires-Dist: pytest-regtest; extra == 'tests'
|
|
110
|
-
Requires-Dist: pyyaml; extra == 'tests'
|
|
111
|
-
Requires-Dist: sounddevice; extra == 'tests'
|
|
112
|
-
Requires-Dist: traitsui; extra == 'tests'
|
|
113
62
|
Description-Content-Type: text/markdown
|
|
114
63
|
|
|
115
64
|

|
|
@@ -117,45 +66,57 @@ Description-Content-Type: text/markdown
|
|
|
117
66
|
[](https://pypi.org/project/acoular)
|
|
118
67
|
[](https://pypi.org/project/acoular)
|
|
119
68
|
[](https://github.com/acoular/acoular/actions)
|
|
120
|
-
[](https://zenodo.org/doi/10.5281/zenodo.3690794)
|
|
121
70
|
|
|
122
71
|
# Acoular
|
|
123
|
-
Acoular is a Python module for acoustic beamforming that is distributed under the
|
|
124
|
-
|
|
125
|
-
It is aimed at applications in acoustic testing. Multichannel data recorded by
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
-
|
|
130
|
-
-
|
|
131
|
-
-
|
|
132
|
-
-
|
|
133
|
-
-
|
|
72
|
+
Acoular is a Python module for acoustic beamforming that is distributed under the [BSD 3-clause license](LICENSE).
|
|
73
|
+
|
|
74
|
+
It is aimed at (but not limited to) applications in acoustic testing. Multichannel data recorded by microphone arrays can be processed and analyzed to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
|
|
75
|
+
|
|
76
|
+
👁️📢 Please consider taking the [**Acoular User Survey**](https://www.soscisurvey.de/acoularsurvey). It only takes 2 minutes.
|
|
77
|
+
|
|
78
|
+
- **Website:** https://acoular.org
|
|
79
|
+
- **Blog:** https://blog.acoular.org
|
|
80
|
+
- **Installation:** https://acoular.org/install
|
|
81
|
+
- **Getting Started** https://acoular.org/user_guide/get_started.html
|
|
82
|
+
- **User Guide:** https://acoular.org/user_guide
|
|
83
|
+
- **API Reference:** https://acoular.org/api_ref
|
|
84
|
+
- **Examples:** https://acoular.org/auto_examples
|
|
85
|
+
- **Contributing:** https://acoular.org/contributing
|
|
86
|
+
- **Questions?:** https://github.com/orgs/acoular/discussions
|
|
87
|
+
- **Bug Reports:** https://github.com/acoular/acoular/issues
|
|
88
|
+
- **Report a Security Vulnerability:** https://github.com/acoular/acoular/security/advisories/new
|
|
89
|
+
|
|
90
|
+
## Highlights
|
|
91
|
+
- frequency domain methods:
|
|
92
|
+
- **beamforming:** delay & sum, Capon (adaptive), MUSIC, functional and eigenvalue beamforming
|
|
93
|
+
- **deconvolution:** DAMAS, DAMAS+, Clean, CleanSC, (gridless) orthogonal deconvolution
|
|
94
|
+
- **inverse methods:** CMF (covariance matrix fitting), general inverse beamforming, SODIX
|
|
95
|
+
- time domain methods:
|
|
96
|
+
- **beamforming:** delay & sum
|
|
97
|
+
- **deconvolution:** CleanT
|
|
134
98
|
- 1D, 2D and 3D mapping grids for all methods
|
|
135
|
-
-
|
|
136
|
-
-
|
|
137
|
-
-
|
|
138
|
-
-
|
|
139
|
-
-
|
|
140
|
-
-
|
|
141
|
-
-
|
|
142
|
-
-
|
|
143
|
-
-
|
|
144
|
-
-
|
|
145
|
-
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
# Citing
|
|
151
|
-
|
|
99
|
+
- arbitrary stationary background 🌬️ **flow** can be considered for all methods
|
|
100
|
+
- frequency domain methods for 🌀 **rotating sources** via virtual array rotation for arbitrary arrays
|
|
101
|
+
- all time domain methods can identify 🚂🛩️ **moving sources** with arbitrary trajectory
|
|
102
|
+
- flexible & modular 🧮 **signal processing**:
|
|
103
|
+
- n-th octave band filters
|
|
104
|
+
- fast, slow, and impulse weighting
|
|
105
|
+
- A-, C-, and Z-weighting
|
|
106
|
+
- filter bank
|
|
107
|
+
- linear phase filters
|
|
108
|
+
- intelligent and transparent :floppy_disk: **caching**: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation.
|
|
109
|
+
- 🦥 **lazy** evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed.
|
|
110
|
+
- 🏎️ **efficient & parallel** (multithreaded) computation with [Numba](https://numba.pydata.org) for most algorithms.
|
|
111
|
+
|
|
112
|
+
## Citing
|
|
152
113
|
If you use Acoular for academic work, please consider citing both our
|
|
153
114
|
[publication](https://doi.org/10.1016/j.apacoust.2016.09.015):
|
|
154
115
|
|
|
155
116
|
Sarradj, E., & Herold, G. (2017).
|
|
156
117
|
A Python framework for microphone array data processing.
|
|
157
118
|
Applied Acoustics, 116, 50–58.
|
|
158
|
-
https://doi.org/10.1016/j.apacoust.2016.09
|
|
119
|
+
https://doi.org/10.1016/j.apacoust.2016.09.015
|
|
159
120
|
|
|
160
121
|
and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
|
|
161
122
|
|
|
@@ -163,41 +124,26 @@ and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
|
|
|
163
124
|
Acoular – Acoustic testing and source mapping software.
|
|
164
125
|
Zenodo. https://zenodo.org/doi/10.5281/zenodo.3690794
|
|
165
126
|
|
|
166
|
-
|
|
167
|
-
Acoular runs under Linux, Windows and MacOS and needs Numpy, Scipy, Traits, scikit-learn, pytables, Numba packages available.
|
|
168
|
-
Matplotlib is needed for some of the examples.
|
|
169
|
-
|
|
170
|
-
If you want to use input from a soundcard, you will also need to install the [sounddevice](https://python-sounddevice.readthedocs.io/en/0.3.12/installation.html) package. Some solvers for the CMF method need [Pylops](https://pylops.readthedocs.io/en/stable/installation.html).
|
|
127
|
+
## Installation
|
|
171
128
|
|
|
172
|
-
|
|
129
|
+
Acoular can be installed from [PyPI](https://pypi.org/project/acoular). It is recommended to use a [virtual environment](https://docs.python.org/3/library/venv.html). Inside the environment, run
|
|
173
130
|
|
|
174
|
-
|
|
131
|
+
pip install acoular
|
|
132
|
+
|
|
133
|
+
A second option is to install Acoular with [conda](https://docs.conda.io/en/latest/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating the environment, run
|
|
175
134
|
|
|
176
135
|
conda install -c acoular acoular
|
|
177
136
|
|
|
178
|
-
This will install Acoular in your Anaconda Python environment and make the Acoular library available from Python. In addition, this will install all dependencies (those other packages mentioned above) if they are not already present on your system.
|
|
179
|
-
|
|
180
|
-
A second option is to install Acoular via [pip](https://pip.pypa.io/en/stable/). It is recommended to use a dedicated [virtual environment](https://virtualenv.pypa.io/en/latest/) and then run
|
|
181
|
-
|
|
182
|
-
pip install acoular
|
|
183
|
-
|
|
184
137
|
For more detailed installation instructions, see the [documentation](https://acoular.org/install/index.html).
|
|
185
138
|
|
|
186
|
-
|
|
139
|
+
## Documentation and help
|
|
187
140
|
Documentation is available [here](https://acoular.org) with a
|
|
188
|
-
[getting started](https://acoular.org/get_started
|
|
141
|
+
[getting started](https://www.acoular.org/user_guide/get_started.html) section and
|
|
189
142
|
[examples](https://acoular.org/auto_examples/index.html).
|
|
190
143
|
|
|
191
|
-
The Acoular [blog](https://acoular.github.io/blog/) contains some tutorials.
|
|
192
|
-
|
|
193
144
|
If you discover problems with the Acoular software, please report them using the [issue tracker](https://github.com/acoular/acoular/issues) on GitHub. Please use the [Acoular discussions forum](https://github.com/acoular/acoular/discussions) for practical questions, discussions, and demos.
|
|
194
145
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
We are always happy to welcome new contributors to the project.
|
|
198
|
-
If you are interested in contributing, have a look at the [CONTRIBUTING.md](CONTRIBUTING.md) file.
|
|
199
|
-
|
|
200
|
-
# Example
|
|
146
|
+
## Example
|
|
201
147
|
This reads data from 64 microphone channels and computes a beamforming map for the 8kHz third octave band:
|
|
202
148
|
|
|
203
149
|
```python
|
|
@@ -244,4 +190,3 @@ plt.show()
|
|
|
244
190
|
|
|
245
191
|

|
|
246
192
|
|
|
247
|
-
|
acoular-26.1/README.md
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+

|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/acoular)
|
|
4
|
+
[](https://pypi.org/project/acoular)
|
|
5
|
+
[](https://github.com/acoular/acoular/actions)
|
|
6
|
+
[](https://zenodo.org/doi/10.5281/zenodo.3690794)
|
|
7
|
+
|
|
8
|
+
# Acoular
|
|
9
|
+
Acoular is a Python module for acoustic beamforming that is distributed under the [BSD 3-clause license](LICENSE).
|
|
10
|
+
|
|
11
|
+
It is aimed at (but not limited to) applications in acoustic testing. Multichannel data recorded by microphone arrays can be processed and analyzed to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
|
|
12
|
+
|
|
13
|
+
👁️📢 Please consider taking the [**Acoular User Survey**](https://www.soscisurvey.de/acoularsurvey). It only takes 2 minutes.
|
|
14
|
+
|
|
15
|
+
- **Website:** https://acoular.org
|
|
16
|
+
- **Blog:** https://blog.acoular.org
|
|
17
|
+
- **Installation:** https://acoular.org/install
|
|
18
|
+
- **Getting Started** https://acoular.org/user_guide/get_started.html
|
|
19
|
+
- **User Guide:** https://acoular.org/user_guide
|
|
20
|
+
- **API Reference:** https://acoular.org/api_ref
|
|
21
|
+
- **Examples:** https://acoular.org/auto_examples
|
|
22
|
+
- **Contributing:** https://acoular.org/contributing
|
|
23
|
+
- **Questions?:** https://github.com/orgs/acoular/discussions
|
|
24
|
+
- **Bug Reports:** https://github.com/acoular/acoular/issues
|
|
25
|
+
- **Report a Security Vulnerability:** https://github.com/acoular/acoular/security/advisories/new
|
|
26
|
+
|
|
27
|
+
## Highlights
|
|
28
|
+
- frequency domain methods:
|
|
29
|
+
- **beamforming:** delay & sum, Capon (adaptive), MUSIC, functional and eigenvalue beamforming
|
|
30
|
+
- **deconvolution:** DAMAS, DAMAS+, Clean, CleanSC, (gridless) orthogonal deconvolution
|
|
31
|
+
- **inverse methods:** CMF (covariance matrix fitting), general inverse beamforming, SODIX
|
|
32
|
+
- time domain methods:
|
|
33
|
+
- **beamforming:** delay & sum
|
|
34
|
+
- **deconvolution:** CleanT
|
|
35
|
+
- 1D, 2D and 3D mapping grids for all methods
|
|
36
|
+
- arbitrary stationary background 🌬️ **flow** can be considered for all methods
|
|
37
|
+
- frequency domain methods for 🌀 **rotating sources** via virtual array rotation for arbitrary arrays
|
|
38
|
+
- all time domain methods can identify 🚂🛩️ **moving sources** with arbitrary trajectory
|
|
39
|
+
- flexible & modular 🧮 **signal processing**:
|
|
40
|
+
- n-th octave band filters
|
|
41
|
+
- fast, slow, and impulse weighting
|
|
42
|
+
- A-, C-, and Z-weighting
|
|
43
|
+
- filter bank
|
|
44
|
+
- linear phase filters
|
|
45
|
+
- intelligent and transparent :floppy_disk: **caching**: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation.
|
|
46
|
+
- 🦥 **lazy** evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed.
|
|
47
|
+
- 🏎️ **efficient & parallel** (multithreaded) computation with [Numba](https://numba.pydata.org) for most algorithms.
|
|
48
|
+
|
|
49
|
+
## Citing
|
|
50
|
+
If you use Acoular for academic work, please consider citing both our
|
|
51
|
+
[publication](https://doi.org/10.1016/j.apacoust.2016.09.015):
|
|
52
|
+
|
|
53
|
+
Sarradj, E., & Herold, G. (2017).
|
|
54
|
+
A Python framework for microphone array data processing.
|
|
55
|
+
Applied Acoustics, 116, 50–58.
|
|
56
|
+
https://doi.org/10.1016/j.apacoust.2016.09.015
|
|
57
|
+
|
|
58
|
+
and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
|
|
59
|
+
|
|
60
|
+
Sarradj, E., Herold, G., Kujawski, A., Jekosch, S., Pelling, A. J. R., Czuchaj, M., Gensch, T., & Oertwig, S..
|
|
61
|
+
Acoular – Acoustic testing and source mapping software.
|
|
62
|
+
Zenodo. https://zenodo.org/doi/10.5281/zenodo.3690794
|
|
63
|
+
|
|
64
|
+
## Installation
|
|
65
|
+
|
|
66
|
+
Acoular can be installed from [PyPI](https://pypi.org/project/acoular). It is recommended to use a [virtual environment](https://docs.python.org/3/library/venv.html). Inside the environment, run
|
|
67
|
+
|
|
68
|
+
pip install acoular
|
|
69
|
+
|
|
70
|
+
A second option is to install Acoular with [conda](https://docs.conda.io/en/latest/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating the environment, run
|
|
71
|
+
|
|
72
|
+
conda install -c acoular acoular
|
|
73
|
+
|
|
74
|
+
For more detailed installation instructions, see the [documentation](https://acoular.org/install/index.html).
|
|
75
|
+
|
|
76
|
+
## Documentation and help
|
|
77
|
+
Documentation is available [here](https://acoular.org) with a
|
|
78
|
+
[getting started](https://www.acoular.org/user_guide/get_started.html) section and
|
|
79
|
+
[examples](https://acoular.org/auto_examples/index.html).
|
|
80
|
+
|
|
81
|
+
If you discover problems with the Acoular software, please report them using the [issue tracker](https://github.com/acoular/acoular/issues) on GitHub. Please use the [Acoular discussions forum](https://github.com/acoular/acoular/discussions) for practical questions, discussions, and demos.
|
|
82
|
+
|
|
83
|
+
## Example
|
|
84
|
+
This reads data from 64 microphone channels and computes a beamforming map for the 8kHz third octave band:
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
from pathlib import Path
|
|
88
|
+
|
|
89
|
+
import acoular as ac
|
|
90
|
+
import matplotlib.pylab as plt
|
|
91
|
+
|
|
92
|
+
# this file contains the microphone coordinates
|
|
93
|
+
micgeofile = Path(ac.__file__).parent / 'xml' / 'array_64.xml'
|
|
94
|
+
# set up object managing the microphone coordinates
|
|
95
|
+
mg = ac.MicGeom( file=micgeofile )
|
|
96
|
+
# generate test data, in real life this would come from an array measurement
|
|
97
|
+
p = ac.demo.create_three_sources(mg, h5savefile='three_sources.h5')
|
|
98
|
+
# set up object managing the microphone array data (usually from measurement)
|
|
99
|
+
ts = ac.TimeSamples( file='three_sources.h5')
|
|
100
|
+
# set up object managing the cross spectral matrix computation
|
|
101
|
+
ps = ac.PowerSpectra( source=ts, block_size=128, window='Hanning' )
|
|
102
|
+
# alternatively, you can use the in-memory Mixer object directly:
|
|
103
|
+
# ps = ac.PowerSpectra( source=p, block_size=128, window='Hanning' )
|
|
104
|
+
# set up object managing the mapping grid
|
|
105
|
+
rg = ac.RectGrid( x_min=-0.2, x_max=0.2, y_min=-0.2, y_max=0.2, z=-0.3, \
|
|
106
|
+
increment=0.01 )
|
|
107
|
+
# set up steering vector, implicitely contains also the standard quiescent
|
|
108
|
+
# environment with standard speed of sound
|
|
109
|
+
st = ac.SteeringVector( grid = rg, mics=mg )
|
|
110
|
+
# set up the object managing the delay & sum beamformer
|
|
111
|
+
bb = ac.BeamformerBase( freq_data=ps, steer=st )
|
|
112
|
+
# request the result in the 8kHz third octave band from approriate FFT-Lines
|
|
113
|
+
# this starts the actual computation (data intake, FFT, Welch CSM, beamforming)
|
|
114
|
+
pm = bb.synthetic( 8000, 3 )
|
|
115
|
+
# compute the sound pressure level
|
|
116
|
+
Lm = ac.L_p( pm )
|
|
117
|
+
# plot the map
|
|
118
|
+
plt.imshow( Lm.T, origin='lower', vmin=Lm.max()-10, extent=rg.extent, \
|
|
119
|
+
interpolation='bicubic')
|
|
120
|
+
plt.title('Beamformer (base) for 3 sources measured for 8000 Hz')
|
|
121
|
+
plt.xlabel('x in m')
|
|
122
|
+
plt.ylabel('y in m')
|
|
123
|
+
plt.colorbar(label=r'$L_p$')
|
|
124
|
+
plt.savefig('three_sources.png', dpi=300, bbox_inches='tight')
|
|
125
|
+
plt.show()
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+

|
|
129
|
+
|
|
@@ -102,7 +102,7 @@ class CsmAIAABenchmark(PowerSpectraImport):
|
|
|
102
102
|
#: HDF5 file object
|
|
103
103
|
h5f = Instance(H5FileBase, transient=True)
|
|
104
104
|
|
|
105
|
-
|
|
105
|
+
#: A unique identifier for the CSM importer, based on its properties. (read-only)
|
|
106
106
|
digest = Property(depends_on=['basename', '_csmsum'])
|
|
107
107
|
|
|
108
108
|
@cached_property
|
|
@@ -55,7 +55,7 @@ class Generator(ABCHasStrictTraits):
|
|
|
55
55
|
"""
|
|
56
56
|
|
|
57
57
|
#: Sampling frequency of the signal, defaults to 1.0
|
|
58
|
-
sample_freq = Float(1.0
|
|
58
|
+
sample_freq = Float(1.0)
|
|
59
59
|
|
|
60
60
|
#: Number of signal samples
|
|
61
61
|
num_samples = CInt
|
|
@@ -63,7 +63,7 @@ class Generator(ABCHasStrictTraits):
|
|
|
63
63
|
#: Number of channels
|
|
64
64
|
num_channels = CInt
|
|
65
65
|
|
|
66
|
-
|
|
66
|
+
#: A unique identifier for the generator, based on its properties. (read-only)
|
|
67
67
|
digest = Property(depends_on=['sample_freq', 'num_samples', 'num_channels'])
|
|
68
68
|
|
|
69
69
|
def _get_digest(self):
|
|
@@ -96,7 +96,7 @@ class SamplesGenerator(Generator):
|
|
|
96
96
|
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
|
-
|
|
99
|
+
#: A unique identifier for the generator, based on its properties. (read-only)
|
|
100
100
|
digest = Property(depends_on=['sample_freq', 'num_samples', 'num_channels'])
|
|
101
101
|
|
|
102
102
|
def _get_digest(self):
|
|
@@ -137,7 +137,7 @@ class SpectraGenerator(Generator):
|
|
|
137
137
|
#: The length of the block used to calculate the spectra
|
|
138
138
|
block_size = CInt
|
|
139
139
|
|
|
140
|
-
|
|
140
|
+
#: A unique identifier for the generator, based on its properties. (read-only)
|
|
141
141
|
digest = Property(depends_on=['sample_freq', 'num_samples', 'num_channels', 'num_freqs', 'block_size'])
|
|
142
142
|
|
|
143
143
|
def _get_digest(self):
|
|
@@ -181,7 +181,7 @@ class TimeOut(SamplesGenerator):
|
|
|
181
181
|
#: Number of samples in output, as given by :attr:`source`.
|
|
182
182
|
num_samples = Delegate('source')
|
|
183
183
|
|
|
184
|
-
|
|
184
|
+
#: A unique identifier for the generator, based on its properties. (read-only)
|
|
185
185
|
digest = Property(depends_on=['source.digest'])
|
|
186
186
|
|
|
187
187
|
@cached_property
|
|
@@ -237,7 +237,7 @@ class SpectraOut(SpectraGenerator):
|
|
|
237
237
|
#: The size of the block used to calculate the spectra
|
|
238
238
|
block_size = Delegate('source')
|
|
239
239
|
|
|
240
|
-
|
|
240
|
+
#: A unique identifier for the generator, based on its properties. (read-only)
|
|
241
241
|
digest = Property(depends_on=['source.digest'])
|
|
242
242
|
|
|
243
243
|
@cached_property
|
|
@@ -285,7 +285,7 @@ class InOut(SamplesGenerator, SpectraGenerator):
|
|
|
285
285
|
#: Number of samples / snapshots in output, as given by :attr:`source`.
|
|
286
286
|
num_samples = Delegate('source')
|
|
287
287
|
|
|
288
|
-
|
|
288
|
+
#: A unique identifier for the generator, based on its properties. (read-only)
|
|
289
289
|
digest = Property(depends_on=['source.digest'])
|
|
290
290
|
|
|
291
291
|
@cached_property
|
|
@@ -78,26 +78,26 @@ class Calib(InOut):
|
|
|
78
78
|
"""
|
|
79
79
|
|
|
80
80
|
#: Name of the .xml file to be imported.
|
|
81
|
-
file = Union(None, File(filter=['*.xml'], exists=True)
|
|
81
|
+
file = Union(None, File(filter=['*.xml'], exists=True))
|
|
82
82
|
|
|
83
83
|
#: Number of microphones in the calibration data,
|
|
84
84
|
#: is set automatically when read from file or when data is set.
|
|
85
|
-
num_mics = CInt(0
|
|
85
|
+
num_mics = CInt(0)
|
|
86
86
|
|
|
87
87
|
#: Array of calibration factors,
|
|
88
88
|
#: is set automatically when read from file.
|
|
89
89
|
#: Can be set manually by specifying a NumPy array with shape (num_channels, ) if
|
|
90
90
|
#: :attr:`source` yields time domain signals. For frequency domain signals, the expected
|
|
91
91
|
#: shape is (num_channels * num_freqs).
|
|
92
|
-
data = CArray(
|
|
92
|
+
data = CArray()
|
|
93
93
|
|
|
94
94
|
#: Channels that are to be treated as invalid.
|
|
95
|
-
invalid_channels = List(int
|
|
95
|
+
invalid_channels = List(int)
|
|
96
96
|
|
|
97
97
|
#: Channel mask to serve as an index for all valid channels, is set automatically.
|
|
98
|
-
channels = Property(depends_on=['invalid_channels', 'num_mics']
|
|
98
|
+
channels = Property(depends_on=['invalid_channels', 'num_mics'])
|
|
99
99
|
|
|
100
|
-
|
|
100
|
+
#: A unique identifier for the object, based on its properties. (read-only)
|
|
101
101
|
digest = Property(depends_on=['source.digest', 'data'])
|
|
102
102
|
|
|
103
103
|
@observe('data')
|