acoular 25.7__tar.gz → 26.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {acoular-25.7 → acoular-26.1}/PKG-INFO +54 -105
  2. acoular-26.1/README.md +129 -0
  3. {acoular-25.7 → acoular-26.1}/acoular/aiaa/aiaa.py +8 -10
  4. {acoular-25.7 → acoular-26.1}/acoular/base.py +13 -16
  5. {acoular-25.7 → acoular-26.1}/acoular/calib.py +25 -24
  6. {acoular-25.7 → acoular-26.1}/acoular/configuration.py +2 -2
  7. acoular-25.7/acoular/demo/acoular_demo.py → acoular-26.1/acoular/demo/__init__.py +107 -135
  8. acoular-26.1/acoular/demo/__main__.py +37 -0
  9. {acoular-25.7 → acoular-26.1}/acoular/environments.py +119 -130
  10. {acoular-25.7 → acoular-26.1}/acoular/fbeamform.py +438 -440
  11. {acoular-25.7 → acoular-26.1}/acoular/fprocess.py +18 -13
  12. {acoular-25.7 → acoular-26.1}/acoular/grids.py +122 -301
  13. {acoular-25.7 → acoular-26.1}/acoular/h5cache.py +5 -1
  14. {acoular-25.7 → acoular-26.1}/acoular/h5files.py +96 -9
  15. {acoular-25.7 → acoular-26.1}/acoular/microphones.py +30 -35
  16. {acoular-25.7 → acoular-26.1}/acoular/process.py +14 -25
  17. {acoular-25.7 → acoular-26.1}/acoular/sdinput.py +9 -14
  18. {acoular-25.7 → acoular-26.1}/acoular/signals.py +36 -34
  19. {acoular-25.7 → acoular-26.1}/acoular/sources.py +263 -380
  20. {acoular-25.7 → acoular-26.1}/acoular/spectra.py +60 -80
  21. {acoular-25.7 → acoular-26.1}/acoular/tbeamform.py +242 -224
  22. {acoular-25.7 → acoular-26.1}/acoular/tools/helpers.py +25 -33
  23. {acoular-25.7 → acoular-26.1}/acoular/tools/metrics.py +5 -10
  24. acoular-26.1/acoular/tools/utils.py +283 -0
  25. {acoular-25.7 → acoular-26.1}/acoular/tprocess.py +248 -271
  26. {acoular-25.7 → acoular-26.1}/acoular/trajectory.py +5 -6
  27. {acoular-25.7 → acoular-26.1}/acoular/version.py +2 -2
  28. {acoular-25.7 → acoular-26.1}/pyproject.toml +36 -56
  29. acoular-25.7/README.md +0 -133
  30. acoular-25.7/acoular/demo/__init__.py +0 -19
  31. acoular-25.7/acoular/tools/utils.py +0 -115
  32. {acoular-25.7 → acoular-26.1}/.gitignore +0 -0
  33. {acoular-25.7 → acoular-26.1}/AUTHORS.rst +0 -0
  34. {acoular-25.7 → acoular-26.1}/LICENSE +0 -0
  35. {acoular-25.7 → acoular-26.1}/acoular/__init__.py +0 -0
  36. {acoular-25.7 → acoular-26.1}/acoular/aiaa/__init__.py +0 -0
  37. {acoular-25.7 → acoular-26.1}/acoular/deprecation.py +0 -0
  38. {acoular-25.7 → acoular-26.1}/acoular/fastFuncs.py +0 -0
  39. {acoular-25.7 → acoular-26.1}/acoular/internal.py +0 -0
  40. {acoular-25.7 → acoular-26.1}/acoular/tfastfuncs.py +0 -0
  41. {acoular-25.7 → acoular-26.1}/acoular/tools/__init__.py +0 -0
  42. {acoular-25.7 → acoular-26.1}/acoular/traitsviews.py +0 -0
  43. {acoular-25.7 → acoular-26.1}/acoular/xml/HW90D240_f10.xml +0 -0
  44. {acoular-25.7 → acoular-26.1}/acoular/xml/W90_D105_f10.xml +0 -0
  45. {acoular-25.7 → acoular-26.1}/acoular/xml/acousticam_2c.xml +0 -0
  46. {acoular-25.7 → acoular-26.1}/acoular/xml/acousticam_4c.xml +0 -0
  47. {acoular-25.7 → acoular-26.1}/acoular/xml/array38.xml +0 -0
  48. {acoular-25.7 → acoular-26.1}/acoular/xml/array92x.xml +0 -0
  49. {acoular-25.7 → acoular-26.1}/acoular/xml/array_56.xml +0 -0
  50. {acoular-25.7 → acoular-26.1}/acoular/xml/array_56_10_9.xml +0 -0
  51. {acoular-25.7 → acoular-26.1}/acoular/xml/array_56_bomb.xml +0 -0
  52. {acoular-25.7 → acoular-26.1}/acoular/xml/array_56_v2.xml +0 -0
  53. {acoular-25.7 → acoular-26.1}/acoular/xml/array_64.xml +0 -0
  54. {acoular-25.7 → acoular-26.1}/acoular/xml/array_84_10_9.xml +0 -0
  55. {acoular-25.7 → acoular-26.1}/acoular/xml/array_84_bomb_v3.xml +0 -0
  56. {acoular-25.7 → acoular-26.1}/acoular/xml/calib_vw_ring32.xml +0 -0
  57. {acoular-25.7 → acoular-26.1}/acoular/xml/gfai_ring32.xml +0 -0
  58. {acoular-25.7 → acoular-26.1}/acoular/xml/minidsp_uma-16.xml +0 -0
  59. {acoular-25.7 → acoular-26.1}/acoular/xml/minidsp_uma-16_mirrored.xml +0 -0
  60. {acoular-25.7 → acoular-26.1}/acoular/xml/tub_vogel64.xml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: acoular
3
- Version: 25.7
3
+ Version: 26.1
4
4
  Summary: Python library for acoustic beamforming
5
5
  Project-URL: homepage, https://acoular.org
6
6
  Project-URL: documentation, https://acoular.org
@@ -51,61 +51,14 @@ Requires-Python: <3.14,>=3.10
51
51
  Requires-Dist: numba
52
52
  Requires-Dist: numpy
53
53
  Requires-Dist: scikit-learn
54
- Requires-Dist: scipy!=1.16.*,>=1.1.0
54
+ Requires-Dist: scipy!=1.16.0,>=1.15; python_version == '3.10'
55
+ Requires-Dist: scipy>=1.16.1; python_version > '3.10'
55
56
  Requires-Dist: tables
56
57
  Requires-Dist: traits>=6.0
57
- Provides-Extra: dev
58
- Requires-Dist: graphviz; extra == 'dev'
59
- Requires-Dist: h5py; extra == 'dev'
60
- Requires-Dist: hatch; extra == 'dev'
61
- Requires-Dist: ipython; extra == 'dev'
62
- Requires-Dist: matplotlib; extra == 'dev'
63
- Requires-Dist: numpydoc; extra == 'dev'
64
- Requires-Dist: pickleshare; extra == 'dev'
65
- Requires-Dist: pylops; extra == 'dev'
66
- Requires-Dist: pytest; extra == 'dev'
67
- Requires-Dist: pytest-cases; extra == 'dev'
68
- Requires-Dist: pytest-cov; extra == 'dev'
69
- Requires-Dist: pytest-env; extra == 'dev'
70
- Requires-Dist: pytest-mock; extra == 'dev'
71
- Requires-Dist: pytest-profiling; extra == 'dev'
72
- Requires-Dist: pytest-regtest; extra == 'dev'
73
- Requires-Dist: pyyaml; extra == 'dev'
74
- Requires-Dist: ruff==0.8.1; extra == 'dev'
75
- Requires-Dist: setuptools; extra == 'dev'
76
- Requires-Dist: sounddevice; extra == 'dev'
77
- Requires-Dist: sphinx; extra == 'dev'
78
- Requires-Dist: sphinx-gallery; extra == 'dev'
79
- Requires-Dist: sphinxcontrib-bibtex; extra == 'dev'
80
- Requires-Dist: traitsui; extra == 'dev'
81
- Provides-Extra: docs
82
- Requires-Dist: graphviz; extra == 'docs'
83
- Requires-Dist: ipython; extra == 'docs'
84
- Requires-Dist: matplotlib; extra == 'docs'
85
- Requires-Dist: numpydoc; extra == 'docs'
86
- Requires-Dist: pickleshare; extra == 'docs'
87
- Requires-Dist: setuptools; extra == 'docs'
88
- Requires-Dist: sounddevice; extra == 'docs'
89
- Requires-Dist: sphinx; extra == 'docs'
90
- Requires-Dist: sphinx-gallery; extra == 'docs'
91
- Requires-Dist: sphinxcontrib-bibtex; extra == 'docs'
92
58
  Provides-Extra: full
93
59
  Requires-Dist: matplotlib; extra == 'full'
94
60
  Requires-Dist: pylops; extra == 'full'
95
61
  Requires-Dist: sounddevice; extra == 'full'
96
- Provides-Extra: tests
97
- Requires-Dist: h5py; extra == 'tests'
98
- Requires-Dist: pylops; extra == 'tests'
99
- Requires-Dist: pytest; extra == 'tests'
100
- Requires-Dist: pytest-cases; extra == 'tests'
101
- Requires-Dist: pytest-cov; extra == 'tests'
102
- Requires-Dist: pytest-env; extra == 'tests'
103
- Requires-Dist: pytest-mock; extra == 'tests'
104
- Requires-Dist: pytest-profiling; extra == 'tests'
105
- Requires-Dist: pytest-regtest; extra == 'tests'
106
- Requires-Dist: pyyaml; extra == 'tests'
107
- Requires-Dist: sounddevice; extra == 'tests'
108
- Requires-Dist: traitsui; extra == 'tests'
109
62
  Description-Content-Type: text/markdown
110
63
 
111
64
  ![Acoular Logo](https://github.com/acoular/acoular/blob/master/docs/source/_static/Acoular_logo.png?raw=true)
@@ -113,45 +66,57 @@ Description-Content-Type: text/markdown
113
66
  [![PyPI](https://img.shields.io/pypi/pyversions/acoular.svg)](https://pypi.org/project/acoular)
114
67
  [![PyPI](https://img.shields.io/pypi/v/acoular.svg)](https://pypi.org/project/acoular)
115
68
  [![Actions status](https://github.com/acoular/acoular/actions/workflows/tests.yml/badge.svg)](https://github.com/acoular/acoular/actions)
116
- [![DOI](https://zenodo.org/badge/29729101.svg)](https://zenodo.org/doi/10.5281/zenodo.3690794)
69
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/3690794.svg)](https://zenodo.org/doi/10.5281/zenodo.3690794)
117
70
 
118
71
  # Acoular
119
- Acoular is a Python module for acoustic beamforming that is distributed under the new BSD license.
120
-
121
- It is aimed at applications in acoustic testing. Multichannel data recorded by a microphone array can be processed and analyzed in order to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
122
-
123
- # Features
124
- - frequency domain beamforming algorithms: delay & sum, Capon (adaptive), MUSIC, functional beamforming, eigenvalue beamforming
125
- - frequency domain deconvolution algorithms: DAMAS, DAMAS+, Clean, CleanSC, orthogonal deconvolution
126
- - frequency domain inverse methods: CMF (covariance matrix fitting), general inverse beamforming, SODIX
127
- - time domain methods: delay & sum beamforming, CleanT deconvolution
128
- - time domain methods applicable for moving sources with arbitrary trajectory (linear, circular, arbitrarily 3D curved),
129
- - frequency domain methods for rotating sources via virtual array rotation for arbitrary arrays and with different interpolation techniques
72
+ Acoular is a Python module for acoustic beamforming that is distributed under the [BSD 3-clause license](LICENSE).
73
+
74
+ It is aimed at (but not limited to) applications in acoustic testing. Multichannel data recorded by microphone arrays can be processed and analyzed to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
75
+
76
+ 👁️📢 Please consider taking the [**Acoular User Survey**](https://www.soscisurvey.de/acoularsurvey). It only takes 2 minutes.
77
+
78
+ - **Website:** https://acoular.org
79
+ - **Blog:** https://blog.acoular.org
80
+ - **Installation:** https://acoular.org/install
81
+ - **Getting Started** https://acoular.org/user_guide/get_started.html
82
+ - **User Guide:** https://acoular.org/user_guide
83
+ - **API Reference:** https://acoular.org/api_ref
84
+ - **Examples:** https://acoular.org/auto_examples
85
+ - **Contributing:** https://acoular.org/contributing
86
+ - **Questions?:** https://github.com/orgs/acoular/discussions
87
+ - **Bug Reports:** https://github.com/acoular/acoular/issues
88
+ - **Report a Security Vulnerability:** https://github.com/acoular/acoular/security/advisories/new
89
+
90
+ ## Highlights
91
+ - frequency domain methods:
92
+ - **beamforming:** delay & sum, Capon (adaptive), MUSIC, functional and eigenvalue beamforming
93
+ - **deconvolution:** DAMAS, DAMAS+, Clean, CleanSC, (gridless) orthogonal deconvolution
94
+ - **inverse methods:** CMF (covariance matrix fitting), general inverse beamforming, SODIX
95
+ - time domain methods:
96
+ - **beamforming:** delay & sum
97
+ - **deconvolution:** CleanT
130
98
  - 1D, 2D and 3D mapping grids for all methods
131
- - gridless option for orthogonal deconvolution
132
- - four different built-in steering vector formulations
133
- - arbitrary stationary background flow can be considered for all methods
134
- - efficient cross spectral matrix computation
135
- - flexible modular time domain processing: n-th octave band filters, fast, slow, and impulse weighting, A-, C-, and Z-weighting, filter bank, zero delay filters
136
- - time domain simulation of array microphone signals from fixed and arbitrarily moving sources in arbitrary flow
137
- - fully object-oriented interface
138
- - lazy evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed
139
- - intelligent and transparent caching: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation
140
- - parallel (multithreaded) implementation with Numba for most algorithms
141
- - easily extendable with new algorithms
142
-
143
- # License
144
- Acoular is licensed under the BSD 3-clause. See [LICENSE](LICENSE)
145
-
146
- # Citing
147
-
99
+ - arbitrary stationary background 🌬️ **flow** can be considered for all methods
100
+ - frequency domain methods for 🌀 **rotating sources** via virtual array rotation for arbitrary arrays
101
+ - all time domain methods can identify 🚂🛩️ **moving sources** with arbitrary trajectory
102
+ - flexible & modular 🧮 **signal processing**:
103
+ - n-th octave band filters
104
+ - fast, slow, and impulse weighting
105
+ - A-, C-, and Z-weighting
106
+ - filter bank
107
+ - linear phase filters
108
+ - intelligent and transparent :floppy_disk: **caching**: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation.
109
+ - 🦥 **lazy** evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed.
110
+ - 🏎️ **efficient & parallel** (multithreaded) computation with [Numba](https://numba.pydata.org) for most algorithms.
111
+
112
+ ## Citing
148
113
  If you use Acoular for academic work, please consider citing both our
149
114
  [publication](https://doi.org/10.1016/j.apacoust.2016.09.015):
150
115
 
151
116
  Sarradj, E., & Herold, G. (2017).
152
117
  A Python framework for microphone array data processing.
153
118
  Applied Acoustics, 116, 50–58.
154
- https://doi.org/10.1016/j.apacoust.2016.09
119
+ https://doi.org/10.1016/j.apacoust.2016.09.015
155
120
 
156
121
  and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
157
122
 
@@ -159,41 +124,26 @@ and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
159
124
  Acoular – Acoustic testing and source mapping software.
160
125
  Zenodo. https://zenodo.org/doi/10.5281/zenodo.3690794
161
126
 
162
- # Dependencies
163
- Acoular runs under Linux, Windows and MacOS and needs Numpy, Scipy, Traits, scikit-learn, pytables, Numba packages available.
164
- Matplotlib is needed for some of the examples.
165
-
166
- If you want to use input from a soundcard, you will also need to install the [sounddevice](https://python-sounddevice.readthedocs.io/en/0.3.12/installation.html) package. Some solvers for the CMF method need [Pylops](https://pylops.readthedocs.io/en/stable/installation.html).
127
+ ## Installation
167
128
 
168
- # Installation
129
+ Acoular can be installed from [PyPI](https://pypi.org/project/acoular). It is recommended to use a [virtual environment](https://docs.python.org/3/library/venv.html). Inside the environment, run
169
130
 
170
- Acoular can be installed via [conda](https://docs.conda.io/en/latest/), which is also part of the [Anaconda Python distribution](https://www.anaconda.com/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating this environment, run
131
+ pip install acoular
132
+
133
+ A second option is to install Acoular with [conda](https://docs.conda.io/en/latest/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating the environment, run
171
134
 
172
135
  conda install -c acoular acoular
173
136
 
174
- This will install Acoular in your Anaconda Python environment and make the Acoular library available from Python. In addition, this will install all dependencies (those other packages mentioned above) if they are not already present on your system.
175
-
176
- A second option is to install Acoular via [pip](https://pip.pypa.io/en/stable/). It is recommended to use a dedicated [virtual environment](https://virtualenv.pypa.io/en/latest/) and then run
177
-
178
- pip install acoular
179
-
180
137
  For more detailed installation instructions, see the [documentation](https://acoular.org/install/index.html).
181
138
 
182
- # Documentation and help
139
+ ## Documentation and help
183
140
  Documentation is available [here](https://acoular.org) with a
184
- [getting started](https://acoular.org/get_started/index.html) section and
141
+ [getting started](https://www.acoular.org/user_guide/get_started.html) section and
185
142
  [examples](https://acoular.org/auto_examples/index.html).
186
143
 
187
- The Acoular [blog](https://acoular.github.io/blog/) contains some tutorials.
188
-
189
144
  If you discover problems with the Acoular software, please report them using the [issue tracker](https://github.com/acoular/acoular/issues) on GitHub. Please use the [Acoular discussions forum](https://github.com/acoular/acoular/discussions) for practical questions, discussions, and demos.
190
145
 
191
- # Contributing
192
-
193
- We are always happy to welcome new contributors to the project.
194
- If you are interested in contributing, have a look at the [CONTRIBUTING.md](CONTRIBUTING.md) file.
195
-
196
- # Example
146
+ ## Example
197
147
  This reads data from 64 microphone channels and computes a beamforming map for the 8kHz third octave band:
198
148
 
199
149
  ```python
@@ -238,6 +188,5 @@ plt.savefig('three_sources.png', dpi=300, bbox_inches='tight')
238
188
  plt.show()
239
189
  ```
240
190
 
241
- ![result](https://github.com/acoular/acoular/blob/master/docs/source/get_started/three_source_py3_colormap.png?raw=true)
242
-
191
+ ![result](https://github.com/acoular/acoular/blob/master/docs/source/user_guide/three_source_py3_colormap.png?raw=true)
243
192
 
acoular-26.1/README.md ADDED
@@ -0,0 +1,129 @@
1
+ ![Acoular Logo](https://github.com/acoular/acoular/blob/master/docs/source/_static/Acoular_logo.png?raw=true)
2
+
3
+ [![PyPI](https://img.shields.io/pypi/pyversions/acoular.svg)](https://pypi.org/project/acoular)
4
+ [![PyPI](https://img.shields.io/pypi/v/acoular.svg)](https://pypi.org/project/acoular)
5
+ [![Actions status](https://github.com/acoular/acoular/actions/workflows/tests.yml/badge.svg)](https://github.com/acoular/acoular/actions)
6
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/3690794.svg)](https://zenodo.org/doi/10.5281/zenodo.3690794)
7
+
8
+ # Acoular
9
+ Acoular is a Python module for acoustic beamforming that is distributed under the [BSD 3-clause license](LICENSE).
10
+
11
+ It is aimed at (but not limited to) applications in acoustic testing. Multichannel data recorded by microphone arrays can be processed and analyzed to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
12
+
13
+ 👁️📢 Please consider taking the [**Acoular User Survey**](https://www.soscisurvey.de/acoularsurvey). It only takes 2 minutes.
14
+
15
+ - **Website:** https://acoular.org
16
+ - **Blog:** https://blog.acoular.org
17
+ - **Installation:** https://acoular.org/install
18
+ - **Getting Started** https://acoular.org/user_guide/get_started.html
19
+ - **User Guide:** https://acoular.org/user_guide
20
+ - **API Reference:** https://acoular.org/api_ref
21
+ - **Examples:** https://acoular.org/auto_examples
22
+ - **Contributing:** https://acoular.org/contributing
23
+ - **Questions?:** https://github.com/orgs/acoular/discussions
24
+ - **Bug Reports:** https://github.com/acoular/acoular/issues
25
+ - **Report a Security Vulnerability:** https://github.com/acoular/acoular/security/advisories/new
26
+
27
+ ## Highlights
28
+ - frequency domain methods:
29
+ - **beamforming:** delay & sum, Capon (adaptive), MUSIC, functional and eigenvalue beamforming
30
+ - **deconvolution:** DAMAS, DAMAS+, Clean, CleanSC, (gridless) orthogonal deconvolution
31
+ - **inverse methods:** CMF (covariance matrix fitting), general inverse beamforming, SODIX
32
+ - time domain methods:
33
+ - **beamforming:** delay & sum
34
+ - **deconvolution:** CleanT
35
+ - 1D, 2D and 3D mapping grids for all methods
36
+ - arbitrary stationary background 🌬️ **flow** can be considered for all methods
37
+ - frequency domain methods for 🌀 **rotating sources** via virtual array rotation for arbitrary arrays
38
+ - all time domain methods can identify 🚂🛩️ **moving sources** with arbitrary trajectory
39
+ - flexible & modular 🧮 **signal processing**:
40
+ - n-th octave band filters
41
+ - fast, slow, and impulse weighting
42
+ - A-, C-, and Z-weighting
43
+ - filter bank
44
+ - linear phase filters
45
+ - intelligent and transparent :floppy_disk: **caching**: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation.
46
+ - 🦥 **lazy** evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed.
47
+ - 🏎️ **efficient & parallel** (multithreaded) computation with [Numba](https://numba.pydata.org) for most algorithms.
48
+
49
+ ## Citing
50
+ If you use Acoular for academic work, please consider citing both our
51
+ [publication](https://doi.org/10.1016/j.apacoust.2016.09.015):
52
+
53
+ Sarradj, E., & Herold, G. (2017).
54
+ A Python framework for microphone array data processing.
55
+ Applied Acoustics, 116, 50–58.
56
+ https://doi.org/10.1016/j.apacoust.2016.09.015
57
+
58
+ and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
59
+
60
+ Sarradj, E., Herold, G., Kujawski, A., Jekosch, S., Pelling, A. J. R., Czuchaj, M., Gensch, T., & Oertwig, S..
61
+ Acoular – Acoustic testing and source mapping software.
62
+ Zenodo. https://zenodo.org/doi/10.5281/zenodo.3690794
63
+
64
+ ## Installation
65
+
66
+ Acoular can be installed from [PyPI](https://pypi.org/project/acoular). It is recommended to use a [virtual environment](https://docs.python.org/3/library/venv.html). Inside the environment, run
67
+
68
+ pip install acoular
69
+
70
+ A second option is to install Acoular with [conda](https://docs.conda.io/en/latest/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating the environment, run
71
+
72
+ conda install -c acoular acoular
73
+
74
+ For more detailed installation instructions, see the [documentation](https://acoular.org/install/index.html).
75
+
76
+ ## Documentation and help
77
+ Documentation is available [here](https://acoular.org) with a
78
+ [getting started](https://www.acoular.org/user_guide/get_started.html) section and
79
+ [examples](https://acoular.org/auto_examples/index.html).
80
+
81
+ If you discover problems with the Acoular software, please report them using the [issue tracker](https://github.com/acoular/acoular/issues) on GitHub. Please use the [Acoular discussions forum](https://github.com/acoular/acoular/discussions) for practical questions, discussions, and demos.
82
+
83
+ ## Example
84
+ This reads data from 64 microphone channels and computes a beamforming map for the 8kHz third octave band:
85
+
86
+ ```python
87
+ from pathlib import Path
88
+
89
+ import acoular as ac
90
+ import matplotlib.pylab as plt
91
+
92
+ # this file contains the microphone coordinates
93
+ micgeofile = Path(ac.__file__).parent / 'xml' / 'array_64.xml'
94
+ # set up object managing the microphone coordinates
95
+ mg = ac.MicGeom( file=micgeofile )
96
+ # generate test data, in real life this would come from an array measurement
97
+ p = ac.demo.create_three_sources(mg, h5savefile='three_sources.h5')
98
+ # set up object managing the microphone array data (usually from measurement)
99
+ ts = ac.TimeSamples( file='three_sources.h5')
100
+ # set up object managing the cross spectral matrix computation
101
+ ps = ac.PowerSpectra( source=ts, block_size=128, window='Hanning' )
102
+ # alternatively, you can use the in-memory Mixer object directly:
103
+ # ps = ac.PowerSpectra( source=p, block_size=128, window='Hanning' )
104
+ # set up object managing the mapping grid
105
+ rg = ac.RectGrid( x_min=-0.2, x_max=0.2, y_min=-0.2, y_max=0.2, z=-0.3, \
106
+ increment=0.01 )
107
+ # set up steering vector, implicitely contains also the standard quiescent
108
+ # environment with standard speed of sound
109
+ st = ac.SteeringVector( grid = rg, mics=mg )
110
+ # set up the object managing the delay & sum beamformer
111
+ bb = ac.BeamformerBase( freq_data=ps, steer=st )
112
+ # request the result in the 8kHz third octave band from approriate FFT-Lines
113
+ # this starts the actual computation (data intake, FFT, Welch CSM, beamforming)
114
+ pm = bb.synthetic( 8000, 3 )
115
+ # compute the sound pressure level
116
+ Lm = ac.L_p( pm )
117
+ # plot the map
118
+ plt.imshow( Lm.T, origin='lower', vmin=Lm.max()-10, extent=rg.extent, \
119
+ interpolation='bicubic')
120
+ plt.title('Beamformer (base) for 3 sources measured for 8000 Hz')
121
+ plt.xlabel('x in m')
122
+ plt.ylabel('y in m')
123
+ plt.colorbar(label=r'$L_p$')
124
+ plt.savefig('three_sources.png', dpi=300, bbox_inches='tight')
125
+ plt.show()
126
+ ```
127
+
128
+ ![result](https://github.com/acoular/acoular/blob/master/docs/source/user_guide/three_source_py3_colormap.png?raw=true)
129
+
@@ -27,18 +27,17 @@ Examples
27
27
 
28
28
  import contextlib
29
29
 
30
- from numpy import array
30
+ import numpy as np
31
31
  from traits.api import (
32
32
  File,
33
33
  Instance,
34
34
  Property,
35
35
  Union,
36
36
  cached_property,
37
- on_trait_change,
37
+ observe,
38
38
  property_depends_on,
39
39
  )
40
40
 
41
- from acoular.deprecation import deprecated_alias
42
41
  from acoular.h5files import H5FileBase, _get_h5file_class
43
42
  from acoular.internal import digest
44
43
  from acoular.microphones import MicGeom
@@ -85,7 +84,6 @@ class TriggerAIAABenchmark(TimeSamplesAIAABenchmark):
85
84
  (self.num_samples, self.num_channels) = self.data.shape
86
85
 
87
86
 
88
- @deprecated_alias({'name': 'file'}, removal_version='25.10')
89
87
  class CsmAIAABenchmark(PowerSpectraImport):
90
88
  """Class to load the CSM that is stored in AIAA Benchmark HDF5 file."""
91
89
 
@@ -104,7 +102,7 @@ class CsmAIAABenchmark(PowerSpectraImport):
104
102
  #: HDF5 file object
105
103
  h5f = Instance(H5FileBase, transient=True)
106
104
 
107
- # internal identifier
105
+ #: A unique identifier for the CSM importer, based on its properties. (read-only)
108
106
  digest = Property(depends_on=['basename', '_csmsum'])
109
107
 
110
108
  @cached_property
@@ -115,8 +113,8 @@ class CsmAIAABenchmark(PowerSpectraImport):
115
113
  def _get_basename(self):
116
114
  return get_file_basename(self.file)
117
115
 
118
- @on_trait_change('basename')
119
- def load_data(self):
116
+ @observe('basename')
117
+ def _load_data(self, event): # noqa ARG002
120
118
  """Open the .h5 file and set attributes."""
121
119
  if self.h5f is not None:
122
120
  with contextlib.suppress(OSError):
@@ -156,7 +154,7 @@ class CsmAIAABenchmark(PowerSpectraImport):
156
154
  ndarray
157
155
  Array of length *block_size/2+1* containing the sample frequencies.
158
156
  """
159
- return array(self.h5f.get_data_by_reference('/CsmData/binCenterFrequenciesHz')[:].flatten(), dtype=float)
157
+ return np.array(self.h5f.get_data_by_reference('/CsmData/binCenterFrequenciesHz')[:].flatten(), dtype=float)
160
158
 
161
159
 
162
160
  class MicAIAABenchmark(MicGeom):
@@ -172,8 +170,8 @@ class MicAIAABenchmark(MicGeom):
172
170
  None, File(filter=['*.h5'], exists=True), desc='name of the h5 file containing the microphone geometry'
173
171
  )
174
172
 
175
- @on_trait_change('file')
176
- def _import_mpos(self):
173
+ @observe('file')
174
+ def _import_mpos(self, event): # noqa ARG002
177
175
  """
178
176
  Import the microphone positions from .h5 file.
179
177
 
@@ -7,6 +7,12 @@ The classes in this module are abstract base classes that provide a common inter
7
7
  that generate an output via the generator :meth:`result` in block-wise manner. They are not intended
8
8
  to be used directly, but to be subclassed by classes that implement the actual signal processing.
9
9
 
10
+ .. inheritance-diagram::
11
+ acoular.base
12
+ :top-classes:
13
+ acoular.base.Generator
14
+ :parts: 1
15
+
10
16
  .. autosummary::
11
17
  :toctree: generated/
12
18
 
@@ -32,11 +38,9 @@ from traits.api import (
32
38
  )
33
39
 
34
40
  # acoular imports
35
- from .deprecation import deprecated_alias
36
41
  from .internal import digest
37
42
 
38
43
 
39
- @deprecated_alias({'numchannels': 'num_channels', 'numsamples': 'num_samples'}, removal_version='25.10')
40
44
  class Generator(ABCHasStrictTraits):
41
45
  """Interface for any generating signal processing block.
42
46
 
@@ -51,7 +55,7 @@ class Generator(ABCHasStrictTraits):
51
55
  """
52
56
 
53
57
  #: Sampling frequency of the signal, defaults to 1.0
54
- sample_freq = Float(1.0, desc='sampling frequency')
58
+ sample_freq = Float(1.0)
55
59
 
56
60
  #: Number of signal samples
57
61
  num_samples = CInt
@@ -59,7 +63,7 @@ class Generator(ABCHasStrictTraits):
59
63
  #: Number of channels
60
64
  num_channels = CInt
61
65
 
62
- # internal identifier
66
+ #: A unique identifier for the generator, based on its properties. (read-only)
63
67
  digest = Property(depends_on=['sample_freq', 'num_samples', 'num_channels'])
64
68
 
65
69
  def _get_digest(self):
@@ -92,7 +96,7 @@ class SamplesGenerator(Generator):
92
96
 
93
97
  """
94
98
 
95
- # internal identifier
99
+ #: A unique identifier for the generator, based on its properties. (read-only)
96
100
  digest = Property(depends_on=['sample_freq', 'num_samples', 'num_channels'])
97
101
 
98
102
  def _get_digest(self):
@@ -133,7 +137,7 @@ class SpectraGenerator(Generator):
133
137
  #: The length of the block used to calculate the spectra
134
138
  block_size = CInt
135
139
 
136
- # internal identifier
140
+ #: A unique identifier for the generator, based on its properties. (read-only)
137
141
  digest = Property(depends_on=['sample_freq', 'num_samples', 'num_channels', 'num_freqs', 'block_size'])
138
142
 
139
143
  def _get_digest(self):
@@ -156,7 +160,6 @@ class SpectraGenerator(Generator):
156
160
  """
157
161
 
158
162
 
159
- @deprecated_alias({'numchannels': 'num_channels', 'numsamples': 'num_samples'}, read_only=True, removal_version='25.10')
160
163
  class TimeOut(SamplesGenerator):
161
164
  """
162
165
  Abstract base class receiving from a :attr:`source` and returning time domain signals.
@@ -178,7 +181,7 @@ class TimeOut(SamplesGenerator):
178
181
  #: Number of samples in output, as given by :attr:`source`.
179
182
  num_samples = Delegate('source')
180
183
 
181
- # internal identifier
184
+ #: A unique identifier for the generator, based on its properties. (read-only)
182
185
  digest = Property(depends_on=['source.digest'])
183
186
 
184
187
  @cached_property
@@ -204,11 +207,6 @@ class TimeOut(SamplesGenerator):
204
207
  """
205
208
 
206
209
 
207
- @deprecated_alias(
208
- {'numchannels': 'num_channels', 'numsamples': 'num_samples', 'numfreqs': 'num_freqs'},
209
- read_only=True,
210
- removal_version='25.10',
211
- )
212
210
  class SpectraOut(SpectraGenerator):
213
211
  """
214
212
  Abstract base class receiving from a :attr:`source` and returning frequency domain signals.
@@ -239,7 +237,7 @@ class SpectraOut(SpectraGenerator):
239
237
  #: The size of the block used to calculate the spectra
240
238
  block_size = Delegate('source')
241
239
 
242
- # internal identifier
240
+ #: A unique identifier for the generator, based on its properties. (read-only)
243
241
  digest = Property(depends_on=['source.digest'])
244
242
 
245
243
  @cached_property
@@ -263,7 +261,6 @@ class SpectraOut(SpectraGenerator):
263
261
  """
264
262
 
265
263
 
266
- @deprecated_alias({'numchannels': 'num_channels', 'numsamples': 'num_samples'}, read_only=True, removal_version='25.10')
267
264
  class InOut(SamplesGenerator, SpectraGenerator):
268
265
  """
269
266
  Abstract base class receiving from a :attr:`source` and returning signals in the same domain.
@@ -288,7 +285,7 @@ class InOut(SamplesGenerator, SpectraGenerator):
288
285
  #: Number of samples / snapshots in output, as given by :attr:`source`.
289
286
  num_samples = Delegate('source')
290
287
 
291
- # internal identifier
288
+ #: A unique identifier for the generator, based on its properties. (read-only)
292
289
  digest = Property(depends_on=['source.digest'])
293
290
 
294
291
  @cached_property
@@ -3,6 +3,12 @@
3
3
  # ------------------------------------------------------------------------------
4
4
  """Implements calibration of multichannel time signals.
5
5
 
6
+ .. inheritance-diagram::
7
+ acoular.calib
8
+ :top-classes:
9
+ acoular.base.InOut
10
+ :parts: 1
11
+
6
12
  .. autosummary::
7
13
  :toctree: generated/
8
14
 
@@ -12,19 +18,14 @@
12
18
  # imports from other packages
13
19
  import xml.dom.minidom
14
20
 
15
- from numpy import array, newaxis
16
- from traits.api import CArray, CInt, File, List, Property, Union, cached_property, on_trait_change
17
-
18
- import acoular as ac
19
-
20
- from .base import InOut
21
+ import numpy as np
22
+ from traits.api import CArray, CInt, File, List, Property, Union, cached_property, observe
21
23
 
22
24
  # acoular imports
23
- from .deprecation import deprecated_alias
25
+ from .base import InOut, SamplesGenerator, SpectraGenerator
24
26
  from .internal import digest
25
27
 
26
28
 
27
- @deprecated_alias({'from_file': 'file'}, removal_version='25.10')
28
29
  class Calib(InOut):
29
30
  """Processing block for handling calibration data in `*.xml` or NumPy format.
30
31
 
@@ -77,30 +78,30 @@ class Calib(InOut):
77
78
  """
78
79
 
79
80
  #: Name of the .xml file to be imported.
80
- file = Union(None, File(filter=['*.xml'], exists=True), desc='name of the xml file to import')
81
+ file = Union(None, File(filter=['*.xml'], exists=True))
81
82
 
82
83
  #: Number of microphones in the calibration data,
83
84
  #: is set automatically when read from file or when data is set.
84
- num_mics = CInt(0, desc='number of microphones in the geometry')
85
+ num_mics = CInt(0)
85
86
 
86
87
  #: Array of calibration factors,
87
88
  #: is set automatically when read from file.
88
89
  #: Can be set manually by specifying a NumPy array with shape (num_channels, ) if
89
90
  #: :attr:`source` yields time domain signals. For frequency domain signals, the expected
90
91
  #: shape is (num_channels * num_freqs).
91
- data = CArray(desc='calibration data')
92
+ data = CArray()
92
93
 
93
94
  #: Channels that are to be treated as invalid.
94
- invalid_channels = List(int, desc='list of invalid channels')
95
+ invalid_channels = List(int)
95
96
 
96
97
  #: Channel mask to serve as an index for all valid channels, is set automatically.
97
- channels = Property(depends_on=['invalid_channels', 'num_mics'], desc='channel mask')
98
+ channels = Property(depends_on=['invalid_channels', 'num_mics'])
98
99
 
99
- # Internal identifier
100
+ #: A unique identifier for the object, based on its properties. (read-only)
100
101
  digest = Property(depends_on=['source.digest', 'data'])
101
102
 
102
- @on_trait_change('data')
103
- def set_num_mics(self):
103
+ @observe('data')
104
+ def _update_num_mics(self, event): # noqa ARG002
104
105
  """Sets the number of microphones based on the shape of the data array."""
105
106
  self.num_mics = self.data.shape[0]
106
107
 
@@ -109,14 +110,14 @@ class Calib(InOut):
109
110
  if len(self.invalid_channels) == 0:
110
111
  return slice(0, None, None)
111
112
  allr = [i for i in range(self.num_mics) if i not in self.invalid_channels]
112
- return array(allr)
113
+ return np.array(allr)
113
114
 
114
115
  @cached_property
115
116
  def _get_digest(self):
116
117
  return digest(self)
117
118
 
118
- @on_trait_change('file')
119
- def import_data(self):
119
+ @observe('file')
120
+ def _import_data(self, event): # noqa ARG002
120
121
  """Loads the calibration data from `*.xml` file ."""
121
122
  doc = xml.dom.minidom.parse(self.file)
122
123
  names = []
@@ -124,7 +125,7 @@ class Calib(InOut):
124
125
  for element in doc.getElementsByTagName('pos'):
125
126
  names.append(element.getAttribute('Name'))
126
127
  data.append(float(element.getAttribute('factor')))
127
- self.data = array(data, 'd')
128
+ self.data = np.array(data, 'd')
128
129
  self.num_mics = self.data.shape[0]
129
130
 
130
131
  def __validate_data(self):
@@ -136,13 +137,13 @@ class Calib(InOut):
136
137
  msg = 'No source data available.'
137
138
  raise ValueError(msg)
138
139
  tobj = self.source
139
- while isinstance(tobj, ac.InOut):
140
+ while isinstance(tobj, InOut):
140
141
  tobj = tobj.source
141
- if isinstance(tobj, ac.SamplesGenerator) and (self.data[self.channels].shape[0] != tobj.num_channels):
142
+ if isinstance(tobj, SamplesGenerator) and (self.data[self.channels].shape[0] != tobj.num_channels):
142
143
  msg = f'calibration data shape {self.data[self.channels].shape[0]} does not match \
143
144
  source data shape {tobj.num_channels}'
144
145
  raise ValueError(msg)
145
- if isinstance(tobj, ac.SpectraGenerator) and (
146
+ if isinstance(tobj, SpectraGenerator) and (
146
147
  self.data[self.channels].shape[0] != tobj.num_channels * tobj.num_freqs
147
148
  ):
148
149
  msg = f'calibration data shape {self.data[self.channels].shape[0]} does not match \
@@ -170,4 +171,4 @@ class Calib(InOut):
170
171
  """
171
172
  self.__validate_data()
172
173
  for block in self.source.result(num):
173
- yield block * self.data[self.channels][newaxis]
174
+ yield block * self.data[self.channels][np.newaxis]