acoular 25.10__py3-none-any.whl → 26.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
acoular/tprocess.py CHANGED
@@ -105,17 +105,17 @@ class MaskedTimeOut(TimeOut):
105
105
  source = Instance(SamplesGenerator)
106
106
 
107
107
  #: The index of the first valid sample. Default is ``0``.
108
- start = CInt(0, desc='start of valid samples')
108
+ start = CInt(0)
109
109
 
110
110
  #: The index of the last valid sample (exclusive).
111
111
  #: If set to :obj:`None`, the selection continues until the end of the available data.
112
- stop = Union(None, CInt, desc='stop of valid samples')
112
+ stop = Union(None, CInt)
113
113
 
114
114
  #: List of channel indices to be excluded from processing.
115
- invalid_channels = List(int, desc='list of invalid channels')
115
+ invalid_channels = List(int)
116
116
 
117
117
  #: A mask or index array representing valid channels. (automatically updated)
118
- channels = Property(depends_on=['invalid_channels', 'source.num_channels'], desc='channel mask')
118
+ channels = Property(depends_on=['invalid_channels', 'source.num_channels'])
119
119
 
120
120
  #: Total number of input channels, including invalid channels, as given by
121
121
  #: :attr:`~acoular.base.TimeOut.source`. (read-only).
@@ -125,19 +125,15 @@ class MaskedTimeOut(TimeOut):
125
125
  num_samples_total = Delegate('source', 'num_samples')
126
126
 
127
127
  #: Number of valid input channels after excluding :attr:`invalid_channels`. (read-only)
128
- num_channels = Property(
129
- depends_on=['invalid_channels', 'source.num_channels'], desc='number of valid input channels'
130
- )
128
+ num_channels = Property(depends_on=['invalid_channels', 'source.num_channels'])
131
129
 
132
130
  #: Number of valid time-domain samples, based on :attr:`start` and :attr:`stop` indices.
133
131
  #: (read-only)
134
- num_samples = Property(
135
- depends_on=['start', 'stop', 'source.num_samples'], desc='number of valid samples per channel'
136
- )
132
+ num_samples = Property(depends_on=['start', 'stop', 'source.num_samples'])
137
133
 
138
134
  #: The name of the cache file (without extension). It serves as an internal reference for data
139
135
  #: caching and tracking processed files. (automatically generated)
140
- basename = Property(depends_on=['source.digest'], desc='basename for cache file')
136
+ basename = Property(depends_on=['source.digest'])
141
137
 
142
138
  #: A unique identifier for the object, based on its properties. (read-only)
143
139
  digest = Property(depends_on=['source.digest', 'start', 'stop', 'invalid_channels'])
@@ -274,7 +270,7 @@ class ChannelMixer(TimeOut):
274
270
  #: If not explicitly set, all channels are weighted equally (delault is ``1``).
275
271
  #: The shape of :attr:`weights` must match the :attr:`number of input channels<num_channels>`.
276
272
  #: If an incompatible shape is provided, a :obj:`ValueError` will be raised.
277
- weights = CArray(desc='channel weights')
273
+ weights = CArray()
278
274
 
279
275
  #: The number of output channels, which is always ``1`` for this class since it produces a
280
276
  #: single mixed output. (read-only)
@@ -589,7 +585,7 @@ class AngleTracker(MaskedTimeOut):
589
585
 
590
586
  #: Number of trigger signals per revolution. This allows tracking scenarios where multiple
591
587
  #: trigger pulses occur per rotation. Default is ``1``, meaning a single trigger per revolution.
592
- trigger_per_revo = Int(1, desc='trigger signals per revolution')
588
+ trigger_per_revo = Int(1)
593
589
 
594
590
  #: Rotation direction flag:
595
591
  #:
@@ -597,26 +593,26 @@ class AngleTracker(MaskedTimeOut):
597
593
  #: - ``-1``: clockwise rotation.
598
594
  #:
599
595
  #: Default is ``-1``.
600
- rot_direction = Int(-1, desc='mathematical direction of rotation')
596
+ rot_direction = Int(-1)
601
597
 
602
598
  #: Number of points used for spline interpolation. Default is ``4``.
603
- interp_points = Int(4, desc='Points of interpolation used for spline')
599
+ interp_points = Int(4)
604
600
 
605
601
  #: Initial rotation angle (in radians) corresponding to the first trigger event. This allows
606
602
  #: defining a custom starting reference angle. Default is ``0``.
607
- start_angle = Float(0, desc='rotation angle for trigger position')
603
+ start_angle = Float(0)
608
604
 
609
605
  #: Revolutions per minute (RPM) computed for each sample.
610
606
  #: It is based on the trigger data. (read-only)
611
- rpm = Property(depends_on=['digest'], desc='revolutions per minute for each sample')
607
+ rpm = Property(depends_on=['digest'])
612
608
 
613
609
  #: Average revolutions per minute over the entire dataset.
614
610
  #: It is computed based on the trigger intervals. (read-only)
615
- average_rpm = Property(depends_on=['digest'], desc='average revolutions per minute')
611
+ average_rpm = Property(depends_on=['digest'])
616
612
 
617
613
  #: Computed rotation angle (in radians) for each sample.
618
614
  #: It is interpolated from the trigger data. (read-only)
619
- angle = Property(depends_on=['digest'], desc='rotation angle for each sample')
615
+ angle = Property(depends_on=['digest'])
620
616
 
621
617
  # Internal flag to determine whether rpm and angle calculation has been processed,
622
618
  # prevents recalculation
@@ -741,14 +737,15 @@ class SpatialInterpolator(TimeOut): # pragma: no cover
741
737
 
742
738
  #: The physical microphone geometry. An instance of :class:`~acoular.microphones.MicGeom` that
743
739
  #: defines the positions of the real microphones used for measurement.
744
- mics = Instance(MicGeom(), desc='microphone geometry')
740
+ mics = Instance(MicGeom())
745
741
 
746
742
  #: The virtual microphone geometry. This property defines the positions
747
743
  #: of virtual microphones where interpolated pressure values are computed.
748
744
  #: Default is the physical microphone geometry (:attr:`mics`).
749
- mics_virtual = Property(desc='microphone geometry')
745
+ mics_virtual = Property()
750
746
 
751
- _mics_virtual = Instance(MicGeom, desc='internal microphone geometry;internal usage, read only')
747
+ #: internal microphone geometry;internal usage, read only
748
+ _mics_virtual = Instance(MicGeom)
752
749
 
753
750
  def _get_mics_virtual(self):
754
751
  if not self._mics_virtual and self.mics:
@@ -778,7 +775,6 @@ class SpatialInterpolator(TimeOut): # pragma: no cover
778
775
  'IDW',
779
776
  'custom',
780
777
  'sinc',
781
- desc='method for interpolation used',
782
778
  )
783
779
 
784
780
  #: Defines the spatial dimensionality of the microphone array.
@@ -790,7 +786,7 @@ class SpatialInterpolator(TimeOut): # pragma: no cover
790
786
  #: - ``'ring'``: Circular arrays where rotation needs to be considered.
791
787
  #: - ``'3D'``: Three-dimensional microphone distributions.
792
788
  #: - ``'custom'``: User-defined microphone arrangements.
793
- array_dimension = Enum('1D', '2D', 'ring', '3D', 'custom', desc='spatial dimensionality of the array geometry')
789
+ array_dimension = Enum('1D', '2D', 'ring', '3D', 'custom')
794
790
 
795
791
  #: Sampling frequency of the output signal, inherited from the :attr:`source`. This defines the
796
792
  #: rate at which microphone pressure samples are acquired and processed.
@@ -825,14 +821,11 @@ class SpatialInterpolator(TimeOut): # pragma: no cover
825
821
  #: Number of neighboring microphones used in IDW interpolation. This parameter determines how
826
822
  #: many physical microphones contribute to the weighted sum in inverse distance weighting (IDW)
827
823
  #: interpolation.
828
- num_IDW = Int(3, desc='number of neighboring microphones, DEFAULT=3') # noqa: N815
824
+ num_IDW = Int(3) # noqa: N815
829
825
 
830
826
  #: Weighting exponent for IDW interpolation. This parameter controls the influence of distance
831
827
  #: in inverse distance weighting (IDW). A higher value gives more weight to closer microphones.
832
- p_weight = Float(
833
- 2,
834
- desc='used in interpolation for virtual microphone, weighting power exponent for IDW',
835
- )
828
+ p_weight = Float(2)
836
829
 
837
830
  # Stores the output of :meth:`_virtNewCoord_func`; Read-Only
838
831
  _virtNewCoord_func = Property( # noqa: N815
@@ -861,7 +854,7 @@ class SpatialInterpolator(TimeOut): # pragma: no cover
861
854
 
862
855
  @cached_property
863
856
  def _get_virtNewCoord(self): # noqa N802
864
- return self._virtNewCoord_func(self.mics.mpos, self.mics_virtual.mpos, self.method, self.array_dimension)
857
+ return self._virtNewCoord_func(self.mics.pos, self.mics_virtual.pos, self.method, self.array_dimension)
865
858
 
866
859
  def sinc_mic(self, r):
867
860
  """
@@ -1054,7 +1047,7 @@ class SpatialInterpolator(TimeOut): # pragma: no cover
1054
1047
  # number of time samples
1055
1048
  nTime = p.shape[0]
1056
1049
  # number of virtual mixcs
1057
- nVirtMics = self.mics_virtual.mpos.shape[1]
1050
+ nVirtMics = self.mics_virtual.pos.shape[1]
1058
1051
  # mesh and projection onto polar Coordinates
1059
1052
  meshList, virtNewCoord, newCoord = self._get_virtNewCoord()
1060
1053
  # pressure interpolation init
@@ -1359,7 +1352,7 @@ class SpatialInterpolatorRotation(SpatialInterpolator): # pragma: no cover
1359
1352
  # period for rotation
1360
1353
  period = 2 * np.pi
1361
1354
  # get angle
1362
- angle = self.angle_source.angle()
1355
+ angle = self.angle_source.angle
1363
1356
  # counter to track angle position in time for each block
1364
1357
  count = 0
1365
1358
  for timeData in self.source.result(num):
@@ -1797,7 +1790,7 @@ class FiltOctave(Filter):
1797
1790
  """
1798
1791
 
1799
1792
  #: The center frequency of the octave or third-octave band. Default is ``1000``.
1800
- band = Float(1000.0, desc='band center frequency')
1793
+ band = Float(1000.0)
1801
1794
 
1802
1795
  #: Defines whether the filter is an octave-band or third-octave-band filter.
1803
1796
  #:
@@ -1805,11 +1798,11 @@ class FiltOctave(Filter):
1805
1798
  #: - ``'Third octave'``: Third-octave band filter.
1806
1799
  #:
1807
1800
  #: Default is ``'Octave'``.
1808
- fraction = Map({'Octave': 1, 'Third octave': 3}, default_value='Octave', desc='fraction of octave')
1801
+ fraction = Map({'Octave': 1, 'Third octave': 3}, default_value='Octave')
1809
1802
 
1810
1803
  #: The order of the IIR filter, which affects the steepness of the filter's roll-off.
1811
1804
  #: Default is ``3``.
1812
- order = Int(3, desc='IIR filter order')
1805
+ order = Int(3)
1813
1806
 
1814
1807
  #: Second-order sections representation of the filter coefficients. This property depends on
1815
1808
  #: :attr:`band`, :attr:`fraction`, :attr:`order`, and the source's digest.
@@ -1884,7 +1877,7 @@ class FiltFiltOctave(FiltOctave):
1884
1877
 
1885
1878
  #: The half-order of the IIR filter, applied twice (once forward and once backward). This
1886
1879
  #: results in a final filter order twice as large as the specified value. Default is ``2``.
1887
- order = Int(2, desc='IIR filter half order')
1880
+ order = Int(2)
1888
1881
 
1889
1882
  #: A unique identifier for the filter, based on its properties. (read-only)
1890
1883
  digest = Property(depends_on=['source.digest', 'band', 'fraction', 'order'])
@@ -1999,7 +1992,7 @@ class TimeExpAverage(Filter):
1999
1992
  #: - ``'I'`` (Impulse) → 0.035 (non-standard)
2000
1993
  #:
2001
1994
  #: Default is ``'F'``.
2002
- weight = Map({'F': 0.125, 'S': 1.0, 'I': 0.035}, default_value='F', desc='time weighting')
1995
+ weight = Map({'F': 0.125, 'S': 1.0, 'I': 0.035}, default_value='F')
2003
1996
 
2004
1997
  #: Filter coefficients in second-order section (SOS) format.
2005
1998
  sos = Property(depends_on=['weight', 'source.digest'])
@@ -2072,7 +2065,7 @@ class FiltFreqWeight(Filter):
2072
2065
  #: - ``'Z'``: A flat response with no frequency weighting.
2073
2066
  #:
2074
2067
  #: Default is ``'A'``.
2075
- weight = Enum('A', 'C', 'Z', desc='frequency weighting')
2068
+ weight = Enum('A', 'C', 'Z')
2076
2069
 
2077
2070
  #: Second-order sections (SOS) representation of the filter coefficients. This property is
2078
2071
  #: dynamically computed based on :attr:`weight` and the :attr:`Filter.source`'s digest.
@@ -2256,17 +2249,17 @@ class OctaveFilterBank(FilterBank):
2256
2249
 
2257
2250
  #: The lowest band center frequency index. Default is ``21``.
2258
2251
  #: This index refers to the position in the scale of octave or third-octave bands.
2259
- lband = Int(21, desc='lowest band center frequency index')
2252
+ lband = Int(21)
2260
2253
 
2261
2254
  #: The highest band center frequency index + 1. Default is ``40``.
2262
2255
  #: This is the position in the scale of octave or third-octave bands.
2263
- hband = Int(40, desc='highest band center frequency index + 1')
2256
+ hband = Int(40)
2264
2257
 
2265
2258
  #: The fraction of an octave, either ``'Octave'`` or ``'Third octave'``.
2266
2259
  #: Default is ``'Octave'``.
2267
2260
  #: Determines the width of the frequency bands. 'Octave' refers to full octaves,
2268
2261
  #: and ``'Third octave'`` refers to third-octave bands.
2269
- fraction = Map({'Octave': 1, 'Third octave': 3}, default_value='Octave', desc='fraction of octave')
2262
+ fraction = Map({'Octave': 1, 'Third octave': 3}, default_value='Octave')
2270
2263
 
2271
2264
  #: The list of filter coefficients for all filters in the filter bank.
2272
2265
  #: The coefficients are computed based on the :attr:`lband`, :attr:`hband`,
@@ -2339,20 +2332,22 @@ class WriteWAV(TimeOut):
2339
2332
 
2340
2333
  #: The name of the file to be saved. If none is given, the name will be automatically
2341
2334
  #: generated from the source.
2342
- file = File(filter=['*.wav'], desc='name of wave file')
2335
+ file = File(filter=['*.wav'])
2343
2336
 
2344
2337
  #: The name of the cache file (without extension). It serves as an internal reference for data
2345
2338
  #: caching and tracking processed files. (automatically generated)
2346
2339
  basename = Property(depends_on=['digest'])
2347
2340
 
2348
2341
  #: The list of channels to save. Can only contain one or two channels.
2349
- channels = List(int, desc='channels to save')
2342
+ channels = List(int)
2350
2343
 
2351
2344
  # Bit depth of the output file.
2352
- encoding = Enum('uint8', 'int16', 'int32', desc='bit depth of the output file')
2345
+ #: bit depth of the output file
2346
+ encoding = Enum('uint8', 'int16', 'int32')
2353
2347
 
2354
2348
  # Maximum value to scale the output to. If `None`, the maximum value of the data is used.
2355
- max_val = Either(None, Float, desc='Maximum value to scale the output to.')
2349
+ #: Maximum value to scale the output to.
2350
+ max_val = Either(None, Float)
2356
2351
 
2357
2352
  #: A unique identifier for the filter, based on its properties. (read-only)
2358
2353
  digest = Property(depends_on=['source.digest', 'channels'])
@@ -2519,7 +2514,7 @@ class WriteH5(TimeOut):
2519
2514
 
2520
2515
  #: The name of the file to be saved. If none is given, the name is automatically
2521
2516
  #: generated based on the current timestamp.
2522
- file = File(filter=['*.h5'], desc='name of data file')
2517
+ file = File(filter=['*.h5'])
2523
2518
 
2524
2519
  #: The number of samples to write to file per call to `result` method.
2525
2520
  #: Default is ``-1``, meaning all available data from the source will be written.
@@ -2533,10 +2528,10 @@ class WriteH5(TimeOut):
2533
2528
 
2534
2529
  #: Precision of the entries in the HDF5 file, represented as numpy data types.
2535
2530
  #: Default is ``'float32'``.
2536
- precision = Enum('float32', 'float64', desc='precision of H5 File')
2531
+ precision = Enum('float32', 'float64')
2537
2532
 
2538
2533
  #: Metadata to be stored in the HDF5 file.
2539
- metadata = Dict(desc='metadata to be stored in .h5 file')
2534
+ metadata = Dict()
2540
2535
 
2541
2536
  @cached_property
2542
2537
  def _get_digest(self):
@@ -2692,22 +2687,34 @@ class TimeConvolve(TimeOut):
2692
2687
 
2693
2688
  #: Convolution kernel in the time domain.
2694
2689
  #: The second dimension of the kernel array has to be either ``1`` or match
2695
- #: the :attr:`source`'s :attr:`~acoular.base.SamplesGenerator.num_channels` attribute.
2690
+ #: the :attr:`source`'s :attr:`~acoular.base.Generator.num_channels` attribute.
2696
2691
  #: If only a single kernel is supplied, it is applied to all channels.
2697
- kernel = CArray(dtype=float, desc='Convolution kernel.')
2692
+ kernel = CArray(dtype=float)
2693
+
2694
+ #: Controls whether to extend the output to include the full convolution result.
2695
+ #:
2696
+ #: - If ``False`` (default): Output length is :math:`\\max(L, M)`, where :math:`L` is the
2697
+ #: kernel length and :math:`M` is the signal length. This mode keeps the output length
2698
+ #: equal to the longest input (different from NumPy's ``mode='same'``, since it does not
2699
+ #: pad the output).
2700
+ #: - If ``True``: Output length is :math:`L + M - 1`, returning the full convolution at
2701
+ #: each overlap point (similar to NumPy's ``mode='full'``).
2702
+ #:
2703
+ #: Default is ``False``.
2704
+ extend_signal = Bool(False)
2698
2705
 
2699
2706
  # Internal block size for partitioning signals into smaller segments during processing.
2700
- _block_size = Int(desc='Block size')
2707
+ #: Block size
2708
+ _block_size = Int()
2701
2709
 
2702
2710
  # Blocks of the convolution kernel in the frequency domain.
2703
2711
  # Computed using Fast Fourier Transform (FFT).
2704
2712
  _kernel_blocks = Property(
2705
2713
  depends_on=['kernel', '_block_size'],
2706
- desc='Frequency domain Kernel blocks',
2707
2714
  )
2708
2715
 
2709
2716
  #: A unique identifier for the object, based on its properties. (read-only)
2710
- digest = Property(depends_on=['source.digest', 'kernel'])
2717
+ digest = Property(depends_on=['source.digest', 'kernel', 'extend_signal'])
2711
2718
 
2712
2719
  @cached_property
2713
2720
  def _get_digest(self):
@@ -2765,12 +2772,12 @@ class TimeConvolve(TimeOut):
2765
2772
  return blocks
2766
2773
 
2767
2774
  def result(self, num=128):
2768
- """
2775
+ r"""
2769
2776
  Convolve the source signal with the kernel and yield the result in blocks.
2770
2777
 
2771
- The method generates the convolution of the source signal with the kernel by processing the
2772
- signal in small blocks, performing the convolution in the frequency domain, and yielding the
2773
- results block by block.
2778
+ The method generates the convolution of the source signal (length :math:`M`) with the kernel
2779
+ (length :math:`L`) by processing the signal in small blocks, performing the convolution in
2780
+ the frequency domain, and yielding the results block by block.
2774
2781
 
2775
2782
  Parameters
2776
2783
  ----------
@@ -2781,14 +2788,15 @@ class TimeConvolve(TimeOut):
2781
2788
  Yields
2782
2789
  ------
2783
2790
  :obj:`numpy.ndarray`
2784
- A array of shape (``num``, :attr:`~acoular.base.SamplesGenerator.num_channels`),
2785
- where :attr:`~acoular.base.SamplesGenerator.num_channels` is inhereted from the
2791
+ An array of shape (``num``, :attr:`~acoular.base.Generator.num_channels`),
2792
+ where :attr:`~acoular.base.Generator.num_channels` is inherited from the
2786
2793
  :attr:`source`, representing the convolution result in blocks.
2787
2794
 
2788
2795
  Notes
2789
2796
  -----
2790
2797
  - The kernel is first validated and reshaped if necessary.
2791
2798
  - The convolution is computed efficiently using the FFT in the frequency domain.
2799
+ - The output length is determined by the :attr:`extend_signal` property.
2792
2800
  """
2793
2801
  self._validate_kernel()
2794
2802
  # initialize variables
@@ -2796,10 +2804,13 @@ class TimeConvolve(TimeOut):
2796
2804
  L = self.kernel.shape[0]
2797
2805
  N = self.source.num_channels
2798
2806
  M = self.source.num_samples
2807
+
2808
+ output_size = max(L, M) if not self.extend_signal else L + M - 1
2809
+
2799
2810
  numblocks_kernel = int(np.ceil(L / num)) # number of kernel blocks
2800
2811
  Q = int(np.ceil(M / num)) # number of signal blocks
2801
- R = int(np.ceil((L + M - 1) / num)) # number of output blocks
2802
- last_size = (L + M - 1) % num # size of final block
2812
+ R = int(np.ceil(output_size / num)) # number of output blocks
2813
+ last_size = output_size % num # size of final output block
2803
2814
 
2804
2815
  idx = 0
2805
2816
  fdl = np.zeros([numblocks_kernel, num + 1, N], dtype='complex128')
@@ -2815,7 +2826,8 @@ class TimeConvolve(TimeOut):
2815
2826
  _append_to_fdl(fdl, idx, numblocks_kernel, rfft(buff, axis=0))
2816
2827
  spec_sum = _spectral_sum(spec_sum, fdl, self._kernel_blocks)
2817
2828
  # truncate s.t. total length is L+M-1 (like numpy convolve w/ mode="full")
2818
- yield irfft(spec_sum, axis=0)[num : last_size + num]
2829
+ final_len = last_size if last_size != 0 else num
2830
+ yield irfft(spec_sum, axis=0)[num : final_len + num]
2819
2831
  return
2820
2832
 
2821
2833
  # stream processing of source signal
@@ -2841,7 +2853,8 @@ class TimeConvolve(TimeOut):
2841
2853
  _append_to_fdl(fdl, idx, numblocks_kernel, rfft(buff, axis=0))
2842
2854
  spec_sum = _spectral_sum(spec_sum, fdl, self._kernel_blocks)
2843
2855
  # truncate s.t. total length is L+M-1 (like numpy convolve w/ mode="full")
2844
- yield irfft(spec_sum, axis=0)[num : last_size + num]
2856
+ final_len = last_size if last_size != 0 else num
2857
+ yield irfft(spec_sum, axis=0)[num : final_len + num]
2845
2858
 
2846
2859
 
2847
2860
  @nb.jit(nopython=True, cache=True)
acoular/trajectory.py CHANGED
@@ -81,7 +81,6 @@ class Trajectory(HasStrictTraits):
81
81
  points = Dict(
82
82
  key_trait=Float,
83
83
  value_trait=Tuple(Float, Float, Float),
84
- desc='sampled positions along the trajectory',
85
84
  )
86
85
 
87
86
  #: Automatically determined tuple ``(t_min, t_max)`` representing the start and end times of the
@@ -194,4 +193,4 @@ class Trajectory(HasStrictTraits):
194
193
  t_end = self.interval[1]
195
194
  # all locations are fetched in one go because that is much faster further improvement could
196
195
  # be possible if interpolated locations are fetched in blocks
197
- yield from zip(*self.location(np.arange(t_start, t_end, delta_t), der))
196
+ yield from zip(*self.location(np.arange(t_start, t_end, delta_t), der), strict=True)
acoular/version.py CHANGED
@@ -5,5 +5,5 @@
5
5
  """Dedicated file to determine the package version without importing acoular."""
6
6
 
7
7
  __author__ = 'Acoular Development Team'
8
- __date__ = '23 October 2025'
9
- __version__ = '25.10'
8
+ __date__ = '28 January 2026'
9
+ __version__ = '26.01'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: acoular
3
- Version: 25.10
3
+ Version: 26.1
4
4
  Summary: Python library for acoustic beamforming
5
5
  Project-URL: homepage, https://acoular.org
6
6
  Project-URL: documentation, https://acoular.org
@@ -51,65 +51,14 @@ Requires-Python: <3.14,>=3.10
51
51
  Requires-Dist: numba
52
52
  Requires-Dist: numpy
53
53
  Requires-Dist: scikit-learn
54
- Requires-Dist: scipy!=1.16.*,>=1.1.0
54
+ Requires-Dist: scipy!=1.16.0,>=1.15; python_version == '3.10'
55
+ Requires-Dist: scipy>=1.16.1; python_version > '3.10'
55
56
  Requires-Dist: tables
56
57
  Requires-Dist: traits>=6.0
57
- Provides-Extra: dev
58
- Requires-Dist: graphviz; extra == 'dev'
59
- Requires-Dist: h5py; extra == 'dev'
60
- Requires-Dist: hatch; extra == 'dev'
61
- Requires-Dist: ipython; extra == 'dev'
62
- Requires-Dist: matplotlib; extra == 'dev'
63
- Requires-Dist: numpydoc; extra == 'dev'
64
- Requires-Dist: pickleshare; extra == 'dev'
65
- Requires-Dist: pydata-sphinx-theme; extra == 'dev'
66
- Requires-Dist: pylops; extra == 'dev'
67
- Requires-Dist: pytest; extra == 'dev'
68
- Requires-Dist: pytest-cases; extra == 'dev'
69
- Requires-Dist: pytest-cov; extra == 'dev'
70
- Requires-Dist: pytest-env; extra == 'dev'
71
- Requires-Dist: pytest-mock; extra == 'dev'
72
- Requires-Dist: pytest-profiling; extra == 'dev'
73
- Requires-Dist: pytest-regtest; extra == 'dev'
74
- Requires-Dist: pyyaml; extra == 'dev'
75
- Requires-Dist: ruff==0.8.1; extra == 'dev'
76
- Requires-Dist: setuptools; extra == 'dev'
77
- Requires-Dist: sounddevice; extra == 'dev'
78
- Requires-Dist: sphinx; extra == 'dev'
79
- Requires-Dist: sphinx-copybutton; extra == 'dev'
80
- Requires-Dist: sphinx-gallery; extra == 'dev'
81
- Requires-Dist: sphinxcontrib-bibtex; extra == 'dev'
82
- Requires-Dist: traitsui; extra == 'dev'
83
- Provides-Extra: docs
84
- Requires-Dist: graphviz; extra == 'docs'
85
- Requires-Dist: ipython; extra == 'docs'
86
- Requires-Dist: matplotlib; extra == 'docs'
87
- Requires-Dist: numpydoc; extra == 'docs'
88
- Requires-Dist: pickleshare; extra == 'docs'
89
- Requires-Dist: pydata-sphinx-theme; extra == 'docs'
90
- Requires-Dist: setuptools; extra == 'docs'
91
- Requires-Dist: sounddevice; extra == 'docs'
92
- Requires-Dist: sphinx; extra == 'docs'
93
- Requires-Dist: sphinx-copybutton; extra == 'docs'
94
- Requires-Dist: sphinx-gallery; extra == 'docs'
95
- Requires-Dist: sphinxcontrib-bibtex; extra == 'docs'
96
58
  Provides-Extra: full
97
59
  Requires-Dist: matplotlib; extra == 'full'
98
60
  Requires-Dist: pylops; extra == 'full'
99
61
  Requires-Dist: sounddevice; extra == 'full'
100
- Provides-Extra: tests
101
- Requires-Dist: h5py; extra == 'tests'
102
- Requires-Dist: pylops; extra == 'tests'
103
- Requires-Dist: pytest; extra == 'tests'
104
- Requires-Dist: pytest-cases; extra == 'tests'
105
- Requires-Dist: pytest-cov; extra == 'tests'
106
- Requires-Dist: pytest-env; extra == 'tests'
107
- Requires-Dist: pytest-mock; extra == 'tests'
108
- Requires-Dist: pytest-profiling; extra == 'tests'
109
- Requires-Dist: pytest-regtest; extra == 'tests'
110
- Requires-Dist: pyyaml; extra == 'tests'
111
- Requires-Dist: sounddevice; extra == 'tests'
112
- Requires-Dist: traitsui; extra == 'tests'
113
62
  Description-Content-Type: text/markdown
114
63
 
115
64
  ![Acoular Logo](https://github.com/acoular/acoular/blob/master/docs/source/_static/Acoular_logo.png?raw=true)
@@ -117,45 +66,57 @@ Description-Content-Type: text/markdown
117
66
  [![PyPI](https://img.shields.io/pypi/pyversions/acoular.svg)](https://pypi.org/project/acoular)
118
67
  [![PyPI](https://img.shields.io/pypi/v/acoular.svg)](https://pypi.org/project/acoular)
119
68
  [![Actions status](https://github.com/acoular/acoular/actions/workflows/tests.yml/badge.svg)](https://github.com/acoular/acoular/actions)
120
- [![DOI](https://zenodo.org/badge/29729101.svg)](https://zenodo.org/doi/10.5281/zenodo.3690794)
69
+ [![DOI](https://zenodo.org/badge/DOI/10.5281/3690794.svg)](https://zenodo.org/doi/10.5281/zenodo.3690794)
121
70
 
122
71
  # Acoular
123
- Acoular is a Python module for acoustic beamforming that is distributed under the new BSD license.
124
-
125
- It is aimed at applications in acoustic testing. Multichannel data recorded by a microphone array can be processed and analyzed in order to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
126
-
127
- # Features
128
- - frequency domain beamforming algorithms: delay & sum, Capon (adaptive), MUSIC, functional beamforming, eigenvalue beamforming
129
- - frequency domain deconvolution algorithms: DAMAS, DAMAS+, Clean, CleanSC, orthogonal deconvolution
130
- - frequency domain inverse methods: CMF (covariance matrix fitting), general inverse beamforming, SODIX
131
- - time domain methods: delay & sum beamforming, CleanT deconvolution
132
- - time domain methods applicable for moving sources with arbitrary trajectory (linear, circular, arbitrarily 3D curved),
133
- - frequency domain methods for rotating sources via virtual array rotation for arbitrary arrays and with different interpolation techniques
72
+ Acoular is a Python module for acoustic beamforming that is distributed under the [BSD 3-clause license](LICENSE).
73
+
74
+ It is aimed at (but not limited to) applications in acoustic testing. Multichannel data recorded by microphone arrays can be processed and analyzed to generate mappings of sound source distributions. The maps (acoustic photographs) can then be used to locate sources of interest and to characterize them using their spectra.
75
+
76
+ 👁️📢 Please consider taking the [**Acoular User Survey**](https://www.soscisurvey.de/acoularsurvey). It only takes 2 minutes.
77
+
78
+ - **Website:** https://acoular.org
79
+ - **Blog:** https://blog.acoular.org
80
+ - **Installation:** https://acoular.org/install
81
+ - **Getting Started** https://acoular.org/user_guide/get_started.html
82
+ - **User Guide:** https://acoular.org/user_guide
83
+ - **API Reference:** https://acoular.org/api_ref
84
+ - **Examples:** https://acoular.org/auto_examples
85
+ - **Contributing:** https://acoular.org/contributing
86
+ - **Questions?:** https://github.com/orgs/acoular/discussions
87
+ - **Bug Reports:** https://github.com/acoular/acoular/issues
88
+ - **Report a Security Vulnerability:** https://github.com/acoular/acoular/security/advisories/new
89
+
90
+ ## Highlights
91
+ - frequency domain methods:
92
+ - **beamforming:** delay & sum, Capon (adaptive), MUSIC, functional and eigenvalue beamforming
93
+ - **deconvolution:** DAMAS, DAMAS+, Clean, CleanSC, (gridless) orthogonal deconvolution
94
+ - **inverse methods:** CMF (covariance matrix fitting), general inverse beamforming, SODIX
95
+ - time domain methods:
96
+ - **beamforming:** delay & sum
97
+ - **deconvolution:** CleanT
134
98
  - 1D, 2D and 3D mapping grids for all methods
135
- - gridless option for orthogonal deconvolution
136
- - four different built-in steering vector formulations
137
- - arbitrary stationary background flow can be considered for all methods
138
- - efficient cross spectral matrix computation
139
- - flexible modular time domain processing: n-th octave band filters, fast, slow, and impulse weighting, A-, C-, and Z-weighting, filter bank, zero delay filters
140
- - time domain simulation of array microphone signals from fixed and arbitrarily moving sources in arbitrary flow
141
- - fully object-oriented interface
142
- - lazy evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed
143
- - intelligent and transparent caching: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation
144
- - parallel (multithreaded) implementation with Numba for most algorithms
145
- - easily extendable with new algorithms
146
-
147
- # License
148
- Acoular is licensed under the BSD 3-clause. See [LICENSE](LICENSE)
149
-
150
- # Citing
151
-
99
+ - arbitrary stationary background 🌬️ **flow** can be considered for all methods
100
+ - frequency domain methods for 🌀 **rotating sources** via virtual array rotation for arbitrary arrays
101
+ - all time domain methods can identify 🚂🛩️ **moving sources** with arbitrary trajectory
102
+ - flexible & modular 🧮 **signal processing**:
103
+ - n-th octave band filters
104
+ - fast, slow, and impulse weighting
105
+ - A-, C-, and Z-weighting
106
+ - filter bank
107
+ - linear phase filters
108
+ - intelligent and transparent :floppy_disk: **caching**: computed results are automatically saved and loaded on the next run to avoid unnecessary re-computation.
109
+ - 🦥 **lazy** evaluation: while processing blocks are set up at any time, (expensive) computations are only performed when needed.
110
+ - 🏎️ **efficient & parallel** (multithreaded) computation with [Numba](https://numba.pydata.org) for most algorithms.
111
+
112
+ ## Citing
152
113
  If you use Acoular for academic work, please consider citing both our
153
114
  [publication](https://doi.org/10.1016/j.apacoust.2016.09.015):
154
115
 
155
116
  Sarradj, E., & Herold, G. (2017).
156
117
  A Python framework for microphone array data processing.
157
118
  Applied Acoustics, 116, 50–58.
158
- https://doi.org/10.1016/j.apacoust.2016.09
119
+ https://doi.org/10.1016/j.apacoust.2016.09.015
159
120
 
160
121
  and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
161
122
 
@@ -163,41 +124,26 @@ and our [software](https://zenodo.org/doi/10.5281/zenodo.3690794):
163
124
  Acoular – Acoustic testing and source mapping software.
164
125
  Zenodo. https://zenodo.org/doi/10.5281/zenodo.3690794
165
126
 
166
- # Dependencies
167
- Acoular runs under Linux, Windows and MacOS and needs Numpy, Scipy, Traits, scikit-learn, pytables, Numba packages available.
168
- Matplotlib is needed for some of the examples.
169
-
170
- If you want to use input from a soundcard, you will also need to install the [sounddevice](https://python-sounddevice.readthedocs.io/en/0.3.12/installation.html) package. Some solvers for the CMF method need [Pylops](https://pylops.readthedocs.io/en/stable/installation.html).
127
+ ## Installation
171
128
 
172
- # Installation
129
+ Acoular can be installed from [PyPI](https://pypi.org/project/acoular). It is recommended to use a [virtual environment](https://docs.python.org/3/library/venv.html). Inside the environment, run
173
130
 
174
- Acoular can be installed via [conda](https://docs.conda.io/en/latest/), which is also part of the [Anaconda Python distribution](https://www.anaconda.com/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating this environment, run
131
+ pip install acoular
132
+
133
+ A second option is to install Acoular with [conda](https://docs.conda.io/en/latest/). It is recommended to install into a dedicated [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). After activating the environment, run
175
134
 
176
135
  conda install -c acoular acoular
177
136
 
178
- This will install Acoular in your Anaconda Python environment and make the Acoular library available from Python. In addition, this will install all dependencies (those other packages mentioned above) if they are not already present on your system.
179
-
180
- A second option is to install Acoular via [pip](https://pip.pypa.io/en/stable/). It is recommended to use a dedicated [virtual environment](https://virtualenv.pypa.io/en/latest/) and then run
181
-
182
- pip install acoular
183
-
184
137
  For more detailed installation instructions, see the [documentation](https://acoular.org/install/index.html).
185
138
 
186
- # Documentation and help
139
+ ## Documentation and help
187
140
  Documentation is available [here](https://acoular.org) with a
188
- [getting started](https://acoular.org/get_started/index.html) section and
141
+ [getting started](https://www.acoular.org/user_guide/get_started.html) section and
189
142
  [examples](https://acoular.org/auto_examples/index.html).
190
143
 
191
- The Acoular [blog](https://acoular.github.io/blog/) contains some tutorials.
192
-
193
144
  If you discover problems with the Acoular software, please report them using the [issue tracker](https://github.com/acoular/acoular/issues) on GitHub. Please use the [Acoular discussions forum](https://github.com/acoular/acoular/discussions) for practical questions, discussions, and demos.
194
145
 
195
- # Contributing
196
-
197
- We are always happy to welcome new contributors to the project.
198
- If you are interested in contributing, have a look at the [CONTRIBUTING.md](CONTRIBUTING.md) file.
199
-
200
- # Example
146
+ ## Example
201
147
  This reads data from 64 microphone channels and computes a beamforming map for the 8kHz third octave band:
202
148
 
203
149
  ```python
@@ -244,4 +190,3 @@ plt.show()
244
190
 
245
191
  ![result](https://github.com/acoular/acoular/blob/master/docs/source/user_guide/three_source_py3_colormap.png?raw=true)
246
192
 
247
-