torchaudio 2.6.0__cp313-cp313-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of torchaudio might be problematic. Click here for more details.

Files changed (146) hide show
  1. torchaudio/__init__.py +53 -0
  2. torchaudio/_backend/__init__.py +61 -0
  3. torchaudio/_backend/backend.py +53 -0
  4. torchaudio/_backend/common.py +52 -0
  5. torchaudio/_backend/ffmpeg.py +334 -0
  6. torchaudio/_backend/soundfile.py +54 -0
  7. torchaudio/_backend/soundfile_backend.py +457 -0
  8. torchaudio/_backend/sox.py +91 -0
  9. torchaudio/_backend/utils.py +317 -0
  10. torchaudio/_extension/__init__.py +74 -0
  11. torchaudio/_extension/utils.py +180 -0
  12. torchaudio/_internal/__init__.py +10 -0
  13. torchaudio/_internal/module_utils.py +113 -0
  14. torchaudio/backend/__init__.py +8 -0
  15. torchaudio/backend/_no_backend.py +25 -0
  16. torchaudio/backend/_sox_io_backend.py +294 -0
  17. torchaudio/backend/common.py +13 -0
  18. torchaudio/backend/no_backend.py +14 -0
  19. torchaudio/backend/soundfile_backend.py +14 -0
  20. torchaudio/backend/sox_io_backend.py +14 -0
  21. torchaudio/compliance/__init__.py +5 -0
  22. torchaudio/compliance/kaldi.py +813 -0
  23. torchaudio/datasets/__init__.py +47 -0
  24. torchaudio/datasets/cmuarctic.py +157 -0
  25. torchaudio/datasets/cmudict.py +186 -0
  26. torchaudio/datasets/commonvoice.py +86 -0
  27. torchaudio/datasets/dr_vctk.py +121 -0
  28. torchaudio/datasets/fluentcommands.py +108 -0
  29. torchaudio/datasets/gtzan.py +1118 -0
  30. torchaudio/datasets/iemocap.py +147 -0
  31. torchaudio/datasets/librilight_limited.py +111 -0
  32. torchaudio/datasets/librimix.py +133 -0
  33. torchaudio/datasets/librispeech.py +174 -0
  34. torchaudio/datasets/librispeech_biasing.py +189 -0
  35. torchaudio/datasets/libritts.py +168 -0
  36. torchaudio/datasets/ljspeech.py +107 -0
  37. torchaudio/datasets/musdb_hq.py +139 -0
  38. torchaudio/datasets/quesst14.py +136 -0
  39. torchaudio/datasets/snips.py +157 -0
  40. torchaudio/datasets/speechcommands.py +183 -0
  41. torchaudio/datasets/tedlium.py +218 -0
  42. torchaudio/datasets/utils.py +54 -0
  43. torchaudio/datasets/vctk.py +143 -0
  44. torchaudio/datasets/voxceleb1.py +309 -0
  45. torchaudio/datasets/yesno.py +89 -0
  46. torchaudio/functional/__init__.py +127 -0
  47. torchaudio/functional/_alignment.py +128 -0
  48. torchaudio/functional/filtering.py +1670 -0
  49. torchaudio/functional/functional.py +2535 -0
  50. torchaudio/io/__init__.py +13 -0
  51. torchaudio/io/_effector.py +347 -0
  52. torchaudio/io/_playback.py +72 -0
  53. torchaudio/kaldi_io.py +144 -0
  54. torchaudio/lib/__init__.py +0 -0
  55. torchaudio/lib/_torchaudio.so +0 -0
  56. torchaudio/lib/_torchaudio_sox.so +0 -0
  57. torchaudio/lib/libtorchaudio.so +0 -0
  58. torchaudio/lib/libtorchaudio_sox.so +0 -0
  59. torchaudio/models/__init__.py +85 -0
  60. torchaudio/models/_hdemucs.py +1008 -0
  61. torchaudio/models/conformer.py +293 -0
  62. torchaudio/models/conv_tasnet.py +330 -0
  63. torchaudio/models/decoder/__init__.py +46 -0
  64. torchaudio/models/decoder/_ctc_decoder.py +568 -0
  65. torchaudio/models/decoder/_cuda_ctc_decoder.py +187 -0
  66. torchaudio/models/deepspeech.py +84 -0
  67. torchaudio/models/emformer.py +884 -0
  68. torchaudio/models/rnnt.py +816 -0
  69. torchaudio/models/rnnt_decoder.py +339 -0
  70. torchaudio/models/squim/__init__.py +11 -0
  71. torchaudio/models/squim/objective.py +326 -0
  72. torchaudio/models/squim/subjective.py +150 -0
  73. torchaudio/models/tacotron2.py +1046 -0
  74. torchaudio/models/wav2letter.py +72 -0
  75. torchaudio/models/wav2vec2/__init__.py +45 -0
  76. torchaudio/models/wav2vec2/components.py +1167 -0
  77. torchaudio/models/wav2vec2/model.py +1579 -0
  78. torchaudio/models/wav2vec2/utils/__init__.py +7 -0
  79. torchaudio/models/wav2vec2/utils/import_fairseq.py +213 -0
  80. torchaudio/models/wav2vec2/utils/import_huggingface.py +134 -0
  81. torchaudio/models/wav2vec2/wavlm_attention.py +214 -0
  82. torchaudio/models/wavernn.py +409 -0
  83. torchaudio/pipelines/__init__.py +102 -0
  84. torchaudio/pipelines/_source_separation_pipeline.py +109 -0
  85. torchaudio/pipelines/_squim_pipeline.py +156 -0
  86. torchaudio/pipelines/_tts/__init__.py +16 -0
  87. torchaudio/pipelines/_tts/impl.py +385 -0
  88. torchaudio/pipelines/_tts/interface.py +255 -0
  89. torchaudio/pipelines/_tts/utils.py +228 -0
  90. torchaudio/pipelines/_wav2vec2/__init__.py +0 -0
  91. torchaudio/pipelines/_wav2vec2/aligner.py +87 -0
  92. torchaudio/pipelines/_wav2vec2/impl.py +1699 -0
  93. torchaudio/pipelines/_wav2vec2/utils.py +346 -0
  94. torchaudio/pipelines/rnnt_pipeline.py +380 -0
  95. torchaudio/prototype/__init__.py +0 -0
  96. torchaudio/prototype/datasets/__init__.py +4 -0
  97. torchaudio/prototype/datasets/musan.py +67 -0
  98. torchaudio/prototype/functional/__init__.py +26 -0
  99. torchaudio/prototype/functional/_dsp.py +433 -0
  100. torchaudio/prototype/functional/_rir.py +379 -0
  101. torchaudio/prototype/functional/functional.py +190 -0
  102. torchaudio/prototype/models/__init__.py +36 -0
  103. torchaudio/prototype/models/_conformer_wav2vec2.py +794 -0
  104. torchaudio/prototype/models/_emformer_hubert.py +333 -0
  105. torchaudio/prototype/models/conv_emformer.py +525 -0
  106. torchaudio/prototype/models/hifi_gan.py +336 -0
  107. torchaudio/prototype/models/rnnt.py +711 -0
  108. torchaudio/prototype/models/rnnt_decoder.py +399 -0
  109. torchaudio/prototype/pipelines/__init__.py +12 -0
  110. torchaudio/prototype/pipelines/_vggish/__init__.py +3 -0
  111. torchaudio/prototype/pipelines/_vggish/_vggish_impl.py +233 -0
  112. torchaudio/prototype/pipelines/_vggish/_vggish_pipeline.py +82 -0
  113. torchaudio/prototype/pipelines/hifigan_pipeline.py +228 -0
  114. torchaudio/prototype/pipelines/rnnt_pipeline.py +58 -0
  115. torchaudio/prototype/transforms/__init__.py +9 -0
  116. torchaudio/prototype/transforms/_transforms.py +456 -0
  117. torchaudio/sox_effects/__init__.py +10 -0
  118. torchaudio/sox_effects/sox_effects.py +272 -0
  119. torchaudio/transforms/__init__.py +75 -0
  120. torchaudio/transforms/_multi_channel.py +467 -0
  121. torchaudio/transforms/_transforms.py +2137 -0
  122. torchaudio/utils/__init__.py +11 -0
  123. torchaudio/utils/download.py +89 -0
  124. torchaudio/utils/ffmpeg_utils.py +11 -0
  125. torchaudio/utils/sox_utils.py +99 -0
  126. torchaudio/version.py +2 -0
  127. torchaudio-2.6.0.dist-info/LICENSE +25 -0
  128. torchaudio-2.6.0.dist-info/METADATA +114 -0
  129. torchaudio-2.6.0.dist-info/RECORD +146 -0
  130. torchaudio-2.6.0.dist-info/WHEEL +5 -0
  131. torchaudio-2.6.0.dist-info/top_level.txt +2 -0
  132. torio/__init__.py +8 -0
  133. torio/_extension/__init__.py +13 -0
  134. torio/_extension/utils.py +147 -0
  135. torio/io/__init__.py +9 -0
  136. torio/io/_streaming_media_decoder.py +978 -0
  137. torio/io/_streaming_media_encoder.py +502 -0
  138. torio/lib/__init__.py +0 -0
  139. torio/lib/_torio_ffmpeg4.so +0 -0
  140. torio/lib/_torio_ffmpeg5.so +0 -0
  141. torio/lib/_torio_ffmpeg6.so +0 -0
  142. torio/lib/libtorio_ffmpeg4.so +0 -0
  143. torio/lib/libtorio_ffmpeg5.so +0 -0
  144. torio/lib/libtorio_ffmpeg6.so +0 -0
  145. torio/utils/__init__.py +4 -0
  146. torio/utils/ffmpeg_utils.py +247 -0
@@ -0,0 +1,1670 @@
1
+ import math
2
+ import warnings
3
+ from typing import Optional
4
+
5
+ import torch
6
+ from torch import Tensor
7
+
8
+ from torchaudio._extension import _IS_TORCHAUDIO_EXT_AVAILABLE
9
+
10
+
11
+ def _dB2Linear(x: float) -> float:
12
+ return math.exp(x * math.log(10) / 20.0)
13
+
14
+
15
+ def _generate_wave_table(
16
+ wave_type: str,
17
+ data_type: str,
18
+ table_size: int,
19
+ min: float,
20
+ max: float,
21
+ phase: float,
22
+ device: torch.device,
23
+ ) -> Tensor:
24
+ r"""A helper function for phaser. Generates a table with given parameters.
25
+
26
+ Args:
27
+ wave_type (str): SINE or TRIANGULAR
28
+ data_type (str): desired data_type ( `INT` or `FLOAT` )
29
+ table_size (int): desired table size
30
+ min (float): desired min value
31
+ max (float): desired max value
32
+ phase (float): desired phase
33
+ device (torch.device): Torch device on which table must be generated
34
+ Returns:
35
+ Tensor: A 1D tensor with wave table values
36
+ """
37
+
38
+ phase_offset = int(phase / math.pi / 2 * table_size + 0.5)
39
+
40
+ t = torch.arange(table_size, device=device, dtype=torch.int32)
41
+
42
+ point = (t + phase_offset) % table_size
43
+
44
+ d = torch.zeros_like(point, device=device, dtype=torch.float64)
45
+
46
+ if wave_type == "SINE":
47
+ d = (torch.sin(point.to(torch.float64) / table_size * 2 * math.pi) + 1) / 2
48
+ elif wave_type == "TRIANGLE":
49
+ d = point.to(torch.float64) * 2 / table_size
50
+ value = torch.div(4 * point, table_size, rounding_mode="floor")
51
+ d[value == 0] = d[value == 0] + 0.5
52
+ d[value == 1] = 1.5 - d[value == 1]
53
+ d[value == 2] = 1.5 - d[value == 2]
54
+ d[value == 3] = d[value == 3] - 1.5
55
+
56
+ d = d * (max - min) + min
57
+
58
+ if data_type == "INT":
59
+ mask = d < 0
60
+ d[mask] = d[mask] - 0.5
61
+ d[~mask] = d[~mask] + 0.5
62
+ d = d.to(torch.int32)
63
+ elif data_type == "FLOAT":
64
+ d = d.to(torch.float32)
65
+
66
+ return d
67
+
68
+
69
+ def allpass_biquad(waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707) -> Tensor:
70
+ r"""Design two-pole all-pass filter. Similar to SoX implementation.
71
+
72
+ .. devices:: CPU CUDA
73
+
74
+ .. properties:: Autograd TorchScript
75
+
76
+ Args:
77
+ waveform(torch.Tensor): audio waveform of dimension of `(..., time)`
78
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
79
+ central_freq (float or torch.Tensor): central frequency (in Hz)
80
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
81
+
82
+ Returns:
83
+ Tensor: Waveform of dimension of `(..., time)`
84
+
85
+ Reference:
86
+ - http://sox.sourceforge.net/sox.html
87
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
88
+ """
89
+ dtype = waveform.dtype
90
+ device = waveform.device
91
+ central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
92
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
93
+
94
+ w0 = 2 * math.pi * central_freq / sample_rate
95
+
96
+ alpha = torch.sin(w0) / 2 / Q
97
+
98
+ b0 = 1 - alpha
99
+ b1 = -2 * torch.cos(w0)
100
+ b2 = 1 + alpha
101
+ a0 = 1 + alpha
102
+ a1 = -2 * torch.cos(w0)
103
+ a2 = 1 - alpha
104
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
105
+
106
+
107
+ def band_biquad(
108
+ waveform: Tensor,
109
+ sample_rate: int,
110
+ central_freq: float,
111
+ Q: float = 0.707,
112
+ noise: bool = False,
113
+ ) -> Tensor:
114
+ r"""Design two-pole band filter. Similar to SoX implementation.
115
+
116
+ .. devices:: CPU CUDA
117
+
118
+ .. properties:: Autograd TorchScript
119
+
120
+ Args:
121
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
122
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
123
+ central_freq (float or torch.Tensor): central frequency (in Hz)
124
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).
125
+ noise (bool, optional) : If ``True``, uses the alternate mode for un-pitched audio (e.g. percussion).
126
+ If ``False``, uses mode oriented to pitched audio, i.e. voice, singing,
127
+ or instrumental music (Default: ``False``).
128
+
129
+ Returns:
130
+ Tensor: Waveform of dimension of `(..., time)`
131
+
132
+ Reference:
133
+ - http://sox.sourceforge.net/sox.html
134
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
135
+ """
136
+ dtype = waveform.dtype
137
+ device = waveform.device
138
+ central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
139
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
140
+
141
+ w0 = 2 * math.pi * central_freq / sample_rate
142
+ bw_Hz = central_freq / Q
143
+
144
+ a0 = 1.0
145
+ a2 = torch.exp(-2 * math.pi * bw_Hz / sample_rate)
146
+ a1 = -4 * a2 / (1 + a2) * torch.cos(w0)
147
+
148
+ b0 = torch.sqrt(1 - a1 * a1 / (4 * a2)) * (1 - a2)
149
+
150
+ if noise:
151
+ mult = torch.sqrt(((1 + a2) * (1 + a2) - a1 * a1) * (1 - a2) / (1 + a2)) / b0
152
+ b0 = mult * b0
153
+
154
+ b1 = 0.0
155
+ b2 = 0.0
156
+
157
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
158
+
159
+
160
+ def bandpass_biquad(
161
+ waveform: Tensor,
162
+ sample_rate: int,
163
+ central_freq: float,
164
+ Q: float = 0.707,
165
+ const_skirt_gain: bool = False,
166
+ ) -> Tensor:
167
+ r"""Design two-pole band-pass filter. Similar to SoX implementation.
168
+
169
+ .. devices:: CPU CUDA
170
+
171
+ .. properties:: Autograd TorchScript
172
+
173
+ Args:
174
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
175
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
176
+ central_freq (float or torch.Tensor): central frequency (in Hz)
177
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
178
+ const_skirt_gain (bool, optional) : If ``True``, uses a constant skirt gain (peak gain = Q).
179
+ If ``False``, uses a constant 0dB peak gain. (Default: ``False``)
180
+
181
+ Returns:
182
+ Tensor: Waveform of dimension of `(..., time)`
183
+
184
+ Reference:
185
+ - http://sox.sourceforge.net/sox.html
186
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
187
+ """
188
+ dtype = waveform.dtype
189
+ device = waveform.device
190
+ central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
191
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
192
+
193
+ w0 = 2 * math.pi * central_freq / sample_rate
194
+ alpha = torch.sin(w0) / 2 / Q
195
+
196
+ temp = torch.sin(w0) / 2 if const_skirt_gain else alpha
197
+ b0 = temp
198
+ b1 = 0.0
199
+ b2 = -temp
200
+ a0 = 1 + alpha
201
+ a1 = -2 * torch.cos(w0)
202
+ a2 = 1 - alpha
203
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
204
+
205
+
206
+ def bandreject_biquad(waveform: Tensor, sample_rate: int, central_freq: float, Q: float = 0.707) -> Tensor:
207
+ r"""Design two-pole band-reject filter. Similar to SoX implementation.
208
+
209
+ .. devices:: CPU CUDA
210
+
211
+ .. properties:: Autograd TorchScript
212
+
213
+ Args:
214
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
215
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
216
+ central_freq (float or torch.Tensor): central frequency (in Hz)
217
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
218
+
219
+ Returns:
220
+ Tensor: Waveform of dimension of `(..., time)`
221
+
222
+ Reference:
223
+ - http://sox.sourceforge.net/sox.html
224
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
225
+ """
226
+ dtype = waveform.dtype
227
+ device = waveform.device
228
+ central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
229
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
230
+
231
+ w0 = 2 * math.pi * central_freq / sample_rate
232
+ alpha = torch.sin(w0) / 2 / Q
233
+
234
+ b0 = 1.0
235
+ b1 = -2 * torch.cos(w0)
236
+ b2 = 1.0
237
+ a0 = 1 + alpha
238
+ a1 = -2 * torch.cos(w0)
239
+ a2 = 1 - alpha
240
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
241
+
242
+
243
+ def bass_biquad(
244
+ waveform: Tensor,
245
+ sample_rate: int,
246
+ gain: float,
247
+ central_freq: float = 100,
248
+ Q: float = 0.707,
249
+ ) -> Tensor:
250
+ r"""Design a bass tone-control effect. Similar to SoX implementation.
251
+
252
+ .. devices:: CPU CUDA
253
+
254
+ .. properties:: Autograd TorchScript
255
+
256
+ Args:
257
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
258
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
259
+ gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB.
260
+ central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``100``)
261
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).
262
+
263
+ Returns:
264
+ Tensor: Waveform of dimension of `(..., time)`
265
+
266
+ Reference:
267
+ - http://sox.sourceforge.net/sox.html
268
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
269
+ """
270
+ dtype = waveform.dtype
271
+ device = waveform.device
272
+ central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
273
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
274
+ gain = torch.as_tensor(gain, dtype=dtype, device=device)
275
+
276
+ w0 = 2 * math.pi * central_freq / sample_rate
277
+ alpha = torch.sin(w0) / 2 / Q
278
+ A = torch.exp(gain / 40 * math.log(10))
279
+
280
+ temp1 = 2 * torch.sqrt(A) * alpha
281
+ temp2 = (A - 1) * torch.cos(w0)
282
+ temp3 = (A + 1) * torch.cos(w0)
283
+
284
+ b0 = A * ((A + 1) - temp2 + temp1)
285
+ b1 = 2 * A * ((A - 1) - temp3)
286
+ b2 = A * ((A + 1) - temp2 - temp1)
287
+ a0 = (A + 1) + temp2 + temp1
288
+ a1 = -2 * ((A - 1) + temp3)
289
+ a2 = (A + 1) + temp2 - temp1
290
+
291
+ return biquad(waveform, b0 / a0, b1 / a0, b2 / a0, a0 / a0, a1 / a0, a2 / a0)
292
+
293
+
294
+ def biquad(waveform: Tensor, b0: float, b1: float, b2: float, a0: float, a1: float, a2: float) -> Tensor:
295
+ r"""Perform a biquad filter of input tensor. Initial conditions set to 0.
296
+
297
+ .. devices:: CPU CUDA
298
+
299
+ .. properties:: Autograd TorchScript
300
+
301
+ Args:
302
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
303
+ b0 (float or torch.Tensor): numerator coefficient of current input, x[n]
304
+ b1 (float or torch.Tensor): numerator coefficient of input one time step ago x[n-1]
305
+ b2 (float or torch.Tensor): numerator coefficient of input two time steps ago x[n-2]
306
+ a0 (float or torch.Tensor): denominator coefficient of current output y[n], typically 1
307
+ a1 (float or torch.Tensor): denominator coefficient of current output y[n-1]
308
+ a2 (float or torch.Tensor): denominator coefficient of current output y[n-2]
309
+
310
+ Returns:
311
+ Tensor: Waveform with dimension of `(..., time)`
312
+
313
+ Reference:
314
+ - https://en.wikipedia.org/wiki/Digital_biquad_filter
315
+ """
316
+
317
+ device = waveform.device
318
+ dtype = waveform.dtype
319
+
320
+ b0 = torch.as_tensor(b0, dtype=dtype, device=device).view(1)
321
+ b1 = torch.as_tensor(b1, dtype=dtype, device=device).view(1)
322
+ b2 = torch.as_tensor(b2, dtype=dtype, device=device).view(1)
323
+ a0 = torch.as_tensor(a0, dtype=dtype, device=device).view(1)
324
+ a1 = torch.as_tensor(a1, dtype=dtype, device=device).view(1)
325
+ a2 = torch.as_tensor(a2, dtype=dtype, device=device).view(1)
326
+
327
+ output_waveform = lfilter(
328
+ waveform,
329
+ torch.cat([a0, a1, a2]),
330
+ torch.cat([b0, b1, b2]),
331
+ )
332
+ return output_waveform
333
+
334
+
335
+ def contrast(waveform: Tensor, enhancement_amount: float = 75.0) -> Tensor:
336
+ r"""Apply contrast effect. Similar to SoX implementation.
337
+
338
+ .. devices:: CPU CUDA
339
+
340
+ .. properties:: Autograd TorchScript
341
+
342
+ Comparable with compression, this effect modifies an audio signal to make it sound louder
343
+
344
+ Args:
345
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
346
+ enhancement_amount (float, optional): controls the amount of the enhancement
347
+ Allowed range of values for enhancement_amount : 0-100
348
+ Note that enhancement_amount = 0 still gives a significant contrast enhancement
349
+
350
+ Returns:
351
+ Tensor: Waveform of dimension of `(..., time)`
352
+
353
+ Reference:
354
+ - http://sox.sourceforge.net/sox.html
355
+ """
356
+
357
+ if not 0 <= enhancement_amount <= 100:
358
+ raise ValueError("Allowed range of values for enhancement_amount : 0-100")
359
+
360
+ contrast = enhancement_amount / 750.0
361
+
362
+ temp1 = waveform * (math.pi / 2)
363
+ temp2 = contrast * torch.sin(temp1 * 4)
364
+ output_waveform = torch.sin(temp1 + temp2)
365
+
366
+ return output_waveform
367
+
368
+
369
+ def dcshift(waveform: Tensor, shift: float, limiter_gain: Optional[float] = None) -> Tensor:
370
+ r"""Apply a DC shift to the audio. Similar to SoX implementation.
371
+
372
+ .. devices:: CPU CUDA
373
+
374
+ .. properties:: TorchScript
375
+
376
+ This can be useful to remove a DC offset
377
+ (caused perhaps by a hardware problem in the recording chain) from the audio
378
+
379
+ Args:
380
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
381
+ shift (float): indicates the amount to shift the audio
382
+ Allowed range of values for shift : -2.0 to +2.0
383
+ limiter_gain (float of None, optional): It is used only on peaks to prevent clipping
384
+ It should have a value much less than 1 (e.g. 0.05 or 0.02)
385
+
386
+ Returns:
387
+ Tensor: Waveform of dimension of `(..., time)`
388
+
389
+ Reference:
390
+ - http://sox.sourceforge.net/sox.html
391
+ """
392
+ output_waveform = waveform
393
+ limiter_threshold = 0.0
394
+
395
+ if limiter_gain is not None:
396
+ limiter_threshold = 1.0 - (abs(shift) - limiter_gain)
397
+
398
+ # Note:
399
+ # the following index-based update breaks auto-grad support
400
+ if limiter_gain is not None and shift > 0:
401
+ mask = waveform > limiter_threshold
402
+ temp = (waveform[mask] - limiter_threshold) * limiter_gain / (1 - limiter_threshold)
403
+ output_waveform[mask] = (temp + limiter_threshold + shift).clamp(max=limiter_threshold)
404
+ output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1)
405
+ elif limiter_gain is not None and shift < 0:
406
+ mask = waveform < -limiter_threshold
407
+ temp = (waveform[mask] + limiter_threshold) * limiter_gain / (1 - limiter_threshold)
408
+ output_waveform[mask] = (temp - limiter_threshold + shift).clamp(min=-limiter_threshold)
409
+ output_waveform[~mask] = (waveform[~mask] + shift).clamp(min=-1, max=1)
410
+ else:
411
+ output_waveform = (waveform + shift).clamp(min=-1, max=1)
412
+
413
+ return output_waveform
414
+
415
+
416
+ def deemph_biquad(waveform: Tensor, sample_rate: int) -> Tensor:
417
+ r"""Apply ISO 908 CD de-emphasis (shelving) IIR filter. Similar to SoX implementation.
418
+
419
+ .. devices:: CPU CUDA
420
+
421
+ .. properties:: Autograd TorchScript
422
+
423
+ Args:
424
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
425
+ sample_rate (int): sampling rate of the waveform, Allowed sample rate ``44100`` or ``48000``
426
+
427
+ Returns:
428
+ Tensor: Waveform of dimension of `(..., time)`
429
+
430
+ Reference:
431
+ - http://sox.sourceforge.net/sox.html
432
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
433
+ """
434
+
435
+ if sample_rate == 44100:
436
+ central_freq = 5283
437
+ width_slope = 0.4845
438
+ gain = -9.477
439
+ elif sample_rate == 48000:
440
+ central_freq = 5356
441
+ width_slope = 0.479
442
+ gain = -9.62
443
+ else:
444
+ raise ValueError("Sample rate must be 44100 (audio-CD) or 48000 (DAT)")
445
+
446
+ w0 = 2 * math.pi * central_freq / sample_rate
447
+ A = math.exp(gain / 40.0 * math.log(10))
448
+ alpha = math.sin(w0) / 2 * math.sqrt((A + 1 / A) * (1 / width_slope - 1) + 2)
449
+
450
+ temp1 = 2 * math.sqrt(A) * alpha
451
+ temp2 = (A - 1) * math.cos(w0)
452
+ temp3 = (A + 1) * math.cos(w0)
453
+
454
+ b0 = A * ((A + 1) + temp2 + temp1)
455
+ b1 = -2 * A * ((A - 1) + temp3)
456
+ b2 = A * ((A + 1) + temp2 - temp1)
457
+ a0 = (A + 1) - temp2 + temp1
458
+ a1 = 2 * ((A - 1) - temp3)
459
+ a2 = (A + 1) - temp2 - temp1
460
+
461
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
462
+
463
+
464
+ def _add_noise_shaping(dithered_waveform: Tensor, waveform: Tensor) -> Tensor:
465
+ r"""Noise shaping is calculated by error:
466
+ error[n] = dithered[n] - original[n]
467
+ noise_shaped_waveform[n] = dithered[n] + error[n-1]
468
+ """
469
+ wf_shape = waveform.size()
470
+ waveform = waveform.reshape(-1, wf_shape[-1])
471
+
472
+ dithered_shape = dithered_waveform.size()
473
+ dithered_waveform = dithered_waveform.reshape(-1, dithered_shape[-1])
474
+
475
+ error = dithered_waveform - waveform
476
+
477
+ # add error[n-1] to dithered_waveform[n], so offset the error by 1 index
478
+ zeros = torch.zeros(1, dtype=error.dtype, device=error.device)
479
+ for index in range(error.size()[0]):
480
+ err = error[index]
481
+ error_offset = torch.cat((zeros, err))
482
+ error[index] = error_offset[: waveform.size()[1]]
483
+
484
+ noise_shaped = dithered_waveform + error
485
+ return noise_shaped.reshape(dithered_shape[:-1] + noise_shaped.shape[-1:])
486
+
487
+
488
+ def _apply_probability_distribution(waveform: Tensor, density_function: str = "TPDF") -> Tensor:
489
+ r"""Apply a probability distribution function on a waveform.
490
+
491
+ Triangular probability density function (TPDF) dither noise has a
492
+ triangular distribution; values in the center of the range have a higher
493
+ probability of occurring.
494
+
495
+ Rectangular probability density function (RPDF) dither noise has a
496
+ uniform distribution; any value in the specified range has the same
497
+ probability of occurring.
498
+
499
+ Gaussian probability density function (GPDF) has a normal distribution.
500
+ The relationship of probabilities of results follows a bell-shaped,
501
+ or Gaussian curve, typical of dither generated by analog sources.
502
+ Args:
503
+ waveform (Tensor): Tensor of audio of dimension (..., time)
504
+ density_function (str, optional): The density function of a
505
+ continuous random variable (Default: ``"TPDF"``)
506
+ Options: Triangular Probability Density Function - `TPDF`
507
+ Rectangular Probability Density Function - `RPDF`
508
+ Gaussian Probability Density Function - `GPDF`
509
+ Returns:
510
+ Tensor: waveform dithered with TPDF
511
+ """
512
+
513
+ # pack batch
514
+ shape = waveform.size()
515
+ waveform = waveform.reshape(-1, shape[-1])
516
+
517
+ channel_size = waveform.size()[0] - 1
518
+ time_size = waveform.size()[-1] - 1
519
+
520
+ random_channel = (
521
+ int(
522
+ torch.randint(
523
+ channel_size,
524
+ [
525
+ 1,
526
+ ],
527
+ ).item()
528
+ )
529
+ if channel_size > 0
530
+ else 0
531
+ )
532
+ random_time = (
533
+ int(
534
+ torch.randint(
535
+ time_size,
536
+ [
537
+ 1,
538
+ ],
539
+ ).item()
540
+ )
541
+ if time_size > 0
542
+ else 0
543
+ )
544
+
545
+ number_of_bits = 16
546
+ up_scaling = 2 ** (number_of_bits - 1) - 2
547
+ signal_scaled = waveform * up_scaling
548
+ down_scaling = 2 ** (number_of_bits - 1)
549
+
550
+ signal_scaled_dis = waveform
551
+ if density_function == "RPDF":
552
+ RPDF = waveform[random_channel][random_time] - 0.5
553
+
554
+ signal_scaled_dis = signal_scaled + RPDF
555
+ elif density_function == "GPDF":
556
+ # TODO Replace by distribution code once
557
+ # https://github.com/pytorch/pytorch/issues/29843 is resolved
558
+ # gaussian = torch.distributions.normal.Normal(torch.mean(waveform, -1), 1).sample()
559
+
560
+ num_rand_variables = 6
561
+
562
+ gaussian = waveform[random_channel][random_time]
563
+ for ws in num_rand_variables * [time_size]:
564
+ rand_chan = int(
565
+ torch.randint(
566
+ channel_size,
567
+ [
568
+ 1,
569
+ ],
570
+ ).item()
571
+ )
572
+ gaussian += waveform[rand_chan][
573
+ int(
574
+ torch.randint(
575
+ ws,
576
+ [
577
+ 1,
578
+ ],
579
+ ).item()
580
+ )
581
+ ]
582
+
583
+ signal_scaled_dis = signal_scaled + gaussian
584
+ else:
585
+ # dtype needed for https://github.com/pytorch/pytorch/issues/32358
586
+ TPDF = torch.bartlett_window(time_size + 1, dtype=signal_scaled.dtype, device=signal_scaled.device)
587
+ TPDF = TPDF.repeat((channel_size + 1), 1)
588
+ signal_scaled_dis = signal_scaled + TPDF
589
+
590
+ quantised_signal_scaled = torch.round(signal_scaled_dis)
591
+ quantised_signal = quantised_signal_scaled / down_scaling
592
+
593
+ # unpack batch
594
+ return quantised_signal.reshape(shape[:-1] + quantised_signal.shape[-1:])
595
+
596
+
597
+ def dither(waveform: Tensor, density_function: str = "TPDF", noise_shaping: bool = False) -> Tensor:
598
+ r"""Apply dither
599
+
600
+ .. devices:: CPU CUDA
601
+
602
+ .. properties:: TorchScript
603
+
604
+ Dither increases the perceived dynamic range of audio stored at a
605
+ particular bit-depth by eliminating nonlinear truncation distortion
606
+ (i.e. adding minimally perceived noise to mask distortion caused by quantization).
607
+
608
+ Args:
609
+ waveform (Tensor): Tensor of audio of dimension (..., time)
610
+ density_function (str, optional):
611
+ The density function of a continuous random variable. One of
612
+ ``"TPDF"`` (Triangular Probability Density Function),
613
+ ``"RPDF"`` (Rectangular Probability Density Function) or
614
+ ``"GPDF"`` (Gaussian Probability Density Function) (Default: ``"TPDF"``).
615
+ noise_shaping (bool, optional): a filtering process that shapes the spectral
616
+ energy of quantisation error (Default: ``False``)
617
+
618
+ Returns:
619
+ Tensor: waveform dithered
620
+ """
621
+ dithered = _apply_probability_distribution(waveform, density_function=density_function)
622
+
623
+ if noise_shaping:
624
+ return _add_noise_shaping(dithered, waveform)
625
+ else:
626
+ return dithered
627
+
628
+
629
+ def equalizer_biquad(
630
+ waveform: Tensor,
631
+ sample_rate: int,
632
+ center_freq: float,
633
+ gain: float,
634
+ Q: float = 0.707,
635
+ ) -> Tensor:
636
+ r"""Design biquad peaking equalizer filter and perform filtering. Similar to SoX implementation.
637
+
638
+ .. devices:: CPU CUDA
639
+
640
+ .. properties:: Autograd TorchScript
641
+
642
+ Args:
643
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
644
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
645
+ center_freq (float): filter's central frequency
646
+ gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB
647
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
648
+
649
+ Returns:
650
+ Tensor: Waveform of dimension of `(..., time)`
651
+ """
652
+ dtype = waveform.dtype
653
+ device = waveform.device
654
+ center_freq = torch.as_tensor(center_freq, dtype=dtype, device=device)
655
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
656
+ gain = torch.as_tensor(gain, dtype=dtype, device=device)
657
+
658
+ w0 = 2 * math.pi * center_freq / sample_rate
659
+ A = torch.exp(gain / 40.0 * math.log(10))
660
+ alpha = torch.sin(w0) / 2 / Q
661
+
662
+ b0 = 1 + alpha * A
663
+ b1 = -2 * torch.cos(w0)
664
+ b2 = 1 - alpha * A
665
+ a0 = 1 + alpha / A
666
+ a1 = -2 * torch.cos(w0)
667
+ a2 = 1 - alpha / A
668
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
669
+
670
+
671
+ def filtfilt(
672
+ waveform: Tensor,
673
+ a_coeffs: Tensor,
674
+ b_coeffs: Tensor,
675
+ clamp: bool = True,
676
+ ) -> Tensor:
677
+ r"""Apply an IIR filter forward and backward to a waveform.
678
+
679
+ .. devices:: CPU CUDA
680
+
681
+ .. properties:: Autograd TorchScript
682
+
683
+ Inspired by https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
684
+
685
+ Args:
686
+ waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1.
687
+ a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either
688
+ 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
689
+ Lower delay coefficients are first, e.g. ``[a0, a1, a2, ...]``.
690
+ Must be same size as b_coeffs (pad with 0's as necessary).
691
+ b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either
692
+ 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
693
+ Lower delay coefficients are first, e.g. ``[b0, b1, b2, ...]``.
694
+ Must be same size as a_coeffs (pad with 0's as necessary).
695
+ clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
696
+
697
+ Returns:
698
+ Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs``
699
+ are 2D Tensors, or `(..., time)` otherwise.
700
+ """
701
+ forward_filtered = lfilter(waveform, a_coeffs, b_coeffs, clamp=False, batching=True)
702
+ backward_filtered = lfilter(
703
+ forward_filtered.flip(-1),
704
+ a_coeffs,
705
+ b_coeffs,
706
+ clamp=clamp,
707
+ batching=True,
708
+ ).flip(-1)
709
+ return backward_filtered
710
+
711
+
712
+ def flanger(
713
+ waveform: Tensor,
714
+ sample_rate: int,
715
+ delay: float = 0.0,
716
+ depth: float = 2.0,
717
+ regen: float = 0.0,
718
+ width: float = 71.0,
719
+ speed: float = 0.5,
720
+ phase: float = 25.0,
721
+ modulation: str = "sinusoidal",
722
+ interpolation: str = "linear",
723
+ ) -> Tensor:
724
+ r"""Apply a flanger effect to the audio. Similar to SoX implementation.
725
+
726
+ .. devices:: CPU CUDA
727
+
728
+ .. properties:: Autograd TorchScript
729
+
730
+ Args:
731
+ waveform (Tensor): audio waveform of dimension of `(..., channel, time)` .
732
+ Max 4 channels allowed
733
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
734
+ delay (float, optional): desired delay in milliseconds(ms)
735
+ Allowed range of values are 0 to 30
736
+ depth (float, optional): desired delay depth in milliseconds(ms)
737
+ Allowed range of values are 0 to 10
738
+ regen (float, optional): desired regen(feedback gain) in dB
739
+ Allowed range of values are -95 to 95
740
+ width (float, optional): desired width(delay gain) in dB
741
+ Allowed range of values are 0 to 100
742
+ speed (float, optional): modulation speed in Hz
743
+ Allowed range of values are 0.1 to 10
744
+ phase (float, optional): percentage phase-shift for multi-channel
745
+ Allowed range of values are 0 to 100
746
+ modulation (str, optional): Use either "sinusoidal" or "triangular" modulation. (Default: ``sinusoidal``)
747
+ interpolation (str, optional): Use either "linear" or "quadratic" for delay-line interpolation.
748
+ (Default: ``linear``)
749
+
750
+ Returns:
751
+ Tensor: Waveform of dimension of `(..., channel, time)`
752
+
753
+ Reference:
754
+ - http://sox.sourceforge.net/sox.html
755
+
756
+ - Scott Lehman, `Effects Explained`_,
757
+
758
+ .. _Effects Explained:
759
+ https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html
760
+ """
761
+
762
+ if modulation not in ("sinusoidal", "triangular"):
763
+ raise ValueError('Only "sinusoidal" or "triangular" modulation allowed')
764
+
765
+ if interpolation not in ("linear", "quadratic"):
766
+ raise ValueError('Only "linear" or "quadratic" interpolation allowed')
767
+
768
+ actual_shape = waveform.shape
769
+ device, dtype = waveform.device, waveform.dtype
770
+
771
+ if actual_shape[-2] > 4:
772
+ raise ValueError("Max 4 channels allowed")
773
+
774
+ # convert to 3D (batch, channels, time)
775
+ waveform = waveform.view(-1, actual_shape[-2], actual_shape[-1])
776
+
777
+ # Scaling
778
+ feedback_gain = regen / 100
779
+ delay_gain = width / 100
780
+ channel_phase = phase / 100
781
+ delay_min = delay / 1000
782
+ delay_depth = depth / 1000
783
+
784
+ n_channels = waveform.shape[-2]
785
+
786
+ if modulation == "sinusoidal":
787
+ wave_type = "SINE"
788
+ else:
789
+ wave_type = "TRIANGLE"
790
+
791
+ # Balance output:
792
+ in_gain = 1.0 / (1 + delay_gain)
793
+ delay_gain = delay_gain / (1 + delay_gain)
794
+
795
+ # Balance feedback loop:
796
+ delay_gain = delay_gain * (1 - abs(feedback_gain))
797
+
798
+ delay_buf_length = int((delay_min + delay_depth) * sample_rate + 0.5)
799
+ delay_buf_length = delay_buf_length + 2
800
+
801
+ delay_bufs = torch.zeros(waveform.shape[0], n_channels, delay_buf_length, dtype=dtype, device=device)
802
+ delay_last = torch.zeros(waveform.shape[0], n_channels, dtype=dtype, device=device)
803
+
804
+ lfo_length = int(sample_rate / speed)
805
+
806
+ table_min = math.floor(delay_min * sample_rate + 0.5)
807
+ table_max = delay_buf_length - 2.0
808
+
809
+ lfo = _generate_wave_table(
810
+ wave_type=wave_type,
811
+ data_type="FLOAT",
812
+ table_size=lfo_length,
813
+ min=float(table_min),
814
+ max=float(table_max),
815
+ phase=3 * math.pi / 2,
816
+ device=device,
817
+ )
818
+
819
+ output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device)
820
+
821
+ delay_buf_pos = 0
822
+ lfo_pos = 0
823
+ channel_idxs = torch.arange(0, n_channels, device=device)
824
+
825
+ for i in range(waveform.shape[-1]):
826
+
827
+ delay_buf_pos = (delay_buf_pos + delay_buf_length - 1) % delay_buf_length
828
+
829
+ cur_channel_phase = (channel_idxs * lfo_length * channel_phase + 0.5).to(torch.int64)
830
+ delay_tensor = lfo[(lfo_pos + cur_channel_phase) % lfo_length]
831
+ frac_delay = torch.frac(delay_tensor)
832
+ delay_tensor = torch.floor(delay_tensor)
833
+
834
+ int_delay = delay_tensor.to(torch.int64)
835
+
836
+ temp = waveform[:, :, i]
837
+
838
+ delay_bufs[:, :, delay_buf_pos] = temp + delay_last * feedback_gain
839
+
840
+ delayed_0 = delay_bufs[:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length]
841
+
842
+ int_delay = int_delay + 1
843
+
844
+ delayed_1 = delay_bufs[:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length]
845
+
846
+ int_delay = int_delay + 1
847
+
848
+ if interpolation == "linear":
849
+ delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay
850
+ else:
851
+ delayed_2 = delay_bufs[:, channel_idxs, (delay_buf_pos + int_delay) % delay_buf_length]
852
+
853
+ int_delay = int_delay + 1
854
+
855
+ delayed_2 = delayed_2 - delayed_0
856
+ delayed_1 = delayed_1 - delayed_0
857
+ a = delayed_2 * 0.5 - delayed_1
858
+ b = delayed_1 * 2 - delayed_2 * 0.5
859
+
860
+ delayed = delayed_0 + (a * frac_delay + b) * frac_delay
861
+
862
+ delay_last = delayed
863
+ output_waveform[:, :, i] = waveform[:, :, i] * in_gain + delayed * delay_gain
864
+
865
+ lfo_pos = (lfo_pos + 1) % lfo_length
866
+
867
+ return output_waveform.clamp(min=-1, max=1).view(actual_shape)
868
+
869
+
870
+ def gain(waveform: Tensor, gain_db: float = 1.0) -> Tensor:
871
+ r"""Apply amplification or attenuation to the whole waveform.
872
+
873
+ .. devices:: CPU CUDA
874
+
875
+ .. properties:: Autograd TorchScript
876
+
877
+ Args:
878
+ waveform (Tensor): Tensor of audio of dimension (..., time).
879
+ gain_db (float, optional) Gain adjustment in decibels (dB) (Default: ``1.0``).
880
+
881
+ Returns:
882
+ Tensor: the whole waveform amplified by gain_db.
883
+ """
884
+ if gain_db == 0:
885
+ return waveform
886
+
887
+ ratio = 10 ** (gain_db / 20)
888
+
889
+ return waveform * ratio
890
+
891
+
892
+ def highpass_biquad(waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707) -> Tensor:
893
+ r"""Design biquad highpass filter and perform filtering. Similar to SoX implementation.
894
+
895
+ .. devices:: CPU CUDA
896
+
897
+ .. properties:: Autograd TorchScript
898
+
899
+ Args:
900
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
901
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
902
+ cutoff_freq (float or torch.Tensor): filter cutoff frequency
903
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
904
+
905
+ Returns:
906
+ Tensor: Waveform dimension of `(..., time)`
907
+ """
908
+ dtype = waveform.dtype
909
+ device = waveform.device
910
+ cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device)
911
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
912
+
913
+ w0 = 2 * math.pi * cutoff_freq / sample_rate
914
+ alpha = torch.sin(w0) / 2.0 / Q
915
+
916
+ b0 = (1 + torch.cos(w0)) / 2
917
+ b1 = -1 - torch.cos(w0)
918
+ b2 = b0
919
+ a0 = 1 + alpha
920
+ a1 = -2 * torch.cos(w0)
921
+ a2 = 1 - alpha
922
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
923
+
924
+
925
+ def _lfilter_core_generic_loop(input_signal_windows: Tensor, a_coeffs_flipped: Tensor, padded_output_waveform: Tensor):
926
+ n_order = a_coeffs_flipped.size(1)
927
+ a_coeffs_flipped = a_coeffs_flipped.unsqueeze(2)
928
+ for i_sample, o0 in enumerate(input_signal_windows.permute(2, 0, 1)):
929
+ windowed_output_signal = padded_output_waveform[:, :, i_sample : i_sample + n_order]
930
+ o0 -= (windowed_output_signal.transpose(0, 1) @ a_coeffs_flipped)[..., 0].t()
931
+ padded_output_waveform[:, :, i_sample + n_order - 1] = o0
932
+
933
+
934
+ if _IS_TORCHAUDIO_EXT_AVAILABLE:
935
+ _lfilter_core_cpu_loop = torch.ops.torchaudio._lfilter_core_loop
936
+ else:
937
+ _lfilter_core_cpu_loop = _lfilter_core_generic_loop
938
+
939
+
940
+ def _lfilter_core(
941
+ waveform: Tensor,
942
+ a_coeffs: Tensor,
943
+ b_coeffs: Tensor,
944
+ ) -> Tensor:
945
+
946
+ if a_coeffs.size() != b_coeffs.size():
947
+ raise ValueError(
948
+ "Expected coeffs to be the same size."
949
+ f"Found a_coeffs size: {a_coeffs.size()}, b_coeffs size: {b_coeffs.size()}"
950
+ )
951
+ if waveform.ndim != 3:
952
+ raise ValueError(f"Expected waveform to be 3 dimensional. Found: {waveform.ndim}")
953
+ if not (waveform.device == a_coeffs.device == b_coeffs.device):
954
+ raise ValueError(
955
+ "Expected waveform and coeffs to be on the same device."
956
+ f"Found: waveform device:{waveform.device}, a_coeffs device: {a_coeffs.device}, "
957
+ f"b_coeffs device: {b_coeffs.device}"
958
+ )
959
+
960
+ n_batch, n_channel, n_sample = waveform.size()
961
+ n_order = a_coeffs.size(1)
962
+ if n_order <= 0:
963
+ raise ValueError(f"Expected n_order to be positive. Found: {n_order}")
964
+
965
+ # Pad the input and create output
966
+
967
+ padded_waveform = torch.nn.functional.pad(waveform, [n_order - 1, 0])
968
+ padded_output_waveform = torch.zeros_like(padded_waveform)
969
+
970
+ # Set up the coefficients matrix
971
+ # Flip coefficients' order
972
+ a_coeffs_flipped = a_coeffs.flip(1)
973
+ b_coeffs_flipped = b_coeffs.flip(1)
974
+
975
+ # calculate windowed_input_signal in parallel using convolution
976
+ input_signal_windows = torch.nn.functional.conv1d(padded_waveform, b_coeffs_flipped.unsqueeze(1), groups=n_channel)
977
+
978
+ input_signal_windows.div_(a_coeffs[:, :1])
979
+ a_coeffs_flipped.div_(a_coeffs[:, :1])
980
+
981
+ if (
982
+ input_signal_windows.device == torch.device("cpu")
983
+ and a_coeffs_flipped.device == torch.device("cpu")
984
+ and padded_output_waveform.device == torch.device("cpu")
985
+ ):
986
+ _lfilter_core_cpu_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform)
987
+ else:
988
+ _lfilter_core_generic_loop(input_signal_windows, a_coeffs_flipped, padded_output_waveform)
989
+
990
+ output = padded_output_waveform[:, :, n_order - 1 :]
991
+ return output
992
+
993
+
994
+ if _IS_TORCHAUDIO_EXT_AVAILABLE:
995
+ _lfilter = torch.ops.torchaudio._lfilter
996
+ else:
997
+ _lfilter = _lfilter_core
998
+
999
+
1000
+ def lfilter(waveform: Tensor, a_coeffs: Tensor, b_coeffs: Tensor, clamp: bool = True, batching: bool = True) -> Tensor:
1001
+ r"""Perform an IIR filter by evaluating difference equation, using differentiable implementation
1002
+ developed separately by *Yu et al.* :cite:`ismir_YuF23` and *Forgione et al.* :cite:`forgione2021dynonet`.
1003
+ The gradients of ``a_coeffs`` are computed based on a faster algorithm from :cite:`ycy2024diffapf`.
1004
+
1005
+ .. devices:: CPU CUDA
1006
+
1007
+ .. properties:: Autograd TorchScript
1008
+
1009
+ Note:
1010
+ To avoid numerical problems, small filter order is preferred.
1011
+ Using double precision could also minimize numerical precision errors.
1012
+
1013
+ Args:
1014
+ waveform (Tensor): audio waveform of dimension of `(..., time)`. Must be normalized to -1 to 1.
1015
+ a_coeffs (Tensor): denominator coefficients of difference equation of dimension of either
1016
+ 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
1017
+ Lower delays coefficients are first, e.g. ``[a0, a1, a2, ...]``.
1018
+ Must be same size as b_coeffs (pad with 0's as necessary).
1019
+ b_coeffs (Tensor): numerator coefficients of difference equation of dimension of either
1020
+ 1D with shape `(num_order + 1)` or 2D with shape `(num_filters, num_order + 1)`.
1021
+ Lower delays coefficients are first, e.g. ``[b0, b1, b2, ...]``.
1022
+ Must be same size as a_coeffs (pad with 0's as necessary).
1023
+ clamp (bool, optional): If ``True``, clamp the output signal to be in the range [-1, 1] (Default: ``True``)
1024
+ batching (bool, optional): Effective only when coefficients are 2D. If ``True``, then waveform should be at
1025
+ least 2D, and the size of second axis from last should equals to ``num_filters``.
1026
+ The output can be expressed as ``output[..., i, :] = lfilter(waveform[..., i, :],
1027
+ a_coeffs[i], b_coeffs[i], clamp=clamp, batching=False)``. (Default: ``True``)
1028
+
1029
+ Returns:
1030
+ Tensor: Waveform with dimension of either `(..., num_filters, time)` if ``a_coeffs`` and ``b_coeffs``
1031
+ are 2D Tensors, or `(..., time)` otherwise.
1032
+ """
1033
+ if a_coeffs.size() != b_coeffs.size():
1034
+ raise ValueError(
1035
+ "Expected coeffs to be the same size."
1036
+ f"Found: a_coeffs size: {a_coeffs.size()}, b_coeffs size: {b_coeffs.size()}"
1037
+ )
1038
+ if a_coeffs.ndim > 2:
1039
+ raise ValueError(f"Expected coeffs to have greater than 1 dimension. Found: {a_coeffs.ndim}")
1040
+
1041
+ if a_coeffs.ndim > 1:
1042
+ if batching:
1043
+ if waveform.ndim <= 0:
1044
+ raise ValueError("Expected waveform to have a positive number of dimensions." f"Found: {waveform.ndim}")
1045
+ if waveform.shape[-2] != a_coeffs.shape[0]:
1046
+ raise ValueError(
1047
+ "Expected number of batches in waveform and coeffs to be the same."
1048
+ f"Found: coeffs batches: {a_coeffs.shape[0]}, waveform batches: {waveform.shape[-2]}"
1049
+ )
1050
+ else:
1051
+ waveform = torch.stack([waveform] * a_coeffs.shape[0], -2)
1052
+ else:
1053
+ a_coeffs = a_coeffs.unsqueeze(0)
1054
+ b_coeffs = b_coeffs.unsqueeze(0)
1055
+
1056
+ # pack batch
1057
+ shape = waveform.size()
1058
+ waveform = waveform.reshape(-1, a_coeffs.shape[0], shape[-1])
1059
+ output = _lfilter(waveform, a_coeffs, b_coeffs)
1060
+
1061
+ if clamp:
1062
+ output = torch.clamp(output, min=-1.0, max=1.0)
1063
+
1064
+ # unpack batch
1065
+ output = output.reshape(shape[:-1] + output.shape[-1:])
1066
+
1067
+ return output
1068
+
1069
+
1070
+ def lowpass_biquad(waveform: Tensor, sample_rate: int, cutoff_freq: float, Q: float = 0.707) -> Tensor:
1071
+ r"""Design biquad lowpass filter and perform filtering. Similar to SoX implementation.
1072
+
1073
+ .. devices:: CPU CUDA
1074
+
1075
+ .. properties:: Autograd TorchScript
1076
+
1077
+ Args:
1078
+ waveform (torch.Tensor): audio waveform of dimension of `(..., time)`
1079
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
1080
+ cutoff_freq (float or torch.Tensor): filter cutoff frequency
1081
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``)
1082
+
1083
+ Returns:
1084
+ Tensor: Waveform of dimension of `(..., time)`
1085
+ """
1086
+ dtype = waveform.dtype
1087
+ device = waveform.device
1088
+ cutoff_freq = torch.as_tensor(cutoff_freq, dtype=dtype, device=device)
1089
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
1090
+
1091
+ w0 = 2 * math.pi * cutoff_freq / sample_rate
1092
+ alpha = torch.sin(w0) / 2 / Q
1093
+
1094
+ b0 = (1 - torch.cos(w0)) / 2
1095
+ b1 = 1 - torch.cos(w0)
1096
+ b2 = b0
1097
+ a0 = 1 + alpha
1098
+ a1 = -2 * torch.cos(w0)
1099
+ a2 = 1 - alpha
1100
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
1101
+
1102
+
1103
+ def _overdrive_core_loop_generic(
1104
+ waveform: Tensor, temp: Tensor, last_in: Tensor, last_out: Tensor, output_waveform: Tensor
1105
+ ):
1106
+ for i in range(waveform.shape[-1]):
1107
+ last_out = temp[:, i] - last_in + 0.995 * last_out
1108
+ last_in = temp[:, i]
1109
+ output_waveform[:, i] = waveform[:, i] * 0.5 + last_out * 0.75
1110
+
1111
+
1112
+ if _IS_TORCHAUDIO_EXT_AVAILABLE:
1113
+ _overdrive_core_loop_cpu = torch.ops.torchaudio._overdrive_core_loop
1114
+ else:
1115
+ _overdrive_core_loop_cpu = _overdrive_core_loop_generic
1116
+
1117
+
1118
+ def overdrive(waveform: Tensor, gain: float = 20, colour: float = 20) -> Tensor:
1119
+ r"""Apply a overdrive effect to the audio. Similar to SoX implementation.
1120
+
1121
+ .. devices:: CPU CUDA
1122
+
1123
+ .. properties:: Autograd TorchScript
1124
+
1125
+ This effect applies a non linear distortion to the audio signal.
1126
+
1127
+ Args:
1128
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
1129
+ gain (float, optional): desired gain at the boost (or attenuation) in dB
1130
+ Allowed range of values are 0 to 100
1131
+ colour (float, optional): controls the amount of even harmonic content in the over-driven output
1132
+ Allowed range of values are 0 to 100
1133
+
1134
+ Returns:
1135
+ Tensor: Waveform of dimension of `(..., time)`
1136
+
1137
+ Reference:
1138
+ - http://sox.sourceforge.net/sox.html
1139
+ """
1140
+ actual_shape = waveform.shape
1141
+ device, dtype = waveform.device, waveform.dtype
1142
+
1143
+ # convert to 2D (..,time)
1144
+ waveform = waveform.view(-1, actual_shape[-1])
1145
+
1146
+ gain = _dB2Linear(gain)
1147
+ colour = colour / 200
1148
+ last_in = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device)
1149
+ last_out = torch.zeros(waveform.shape[:-1], dtype=dtype, device=device)
1150
+
1151
+ temp = waveform * gain + colour
1152
+
1153
+ mask1 = temp < -1
1154
+ temp[mask1] = torch.tensor(-2.0 / 3.0, dtype=dtype, device=device)
1155
+ # Wrapping the constant with Tensor is required for Torchscript
1156
+
1157
+ mask2 = temp > 1
1158
+ temp[mask2] = torch.tensor(2.0 / 3.0, dtype=dtype, device=device)
1159
+
1160
+ mask3 = ~mask1 & ~mask2
1161
+ temp[mask3] = temp[mask3] - (temp[mask3] ** 3) * (1.0 / 3)
1162
+
1163
+ output_waveform = torch.zeros_like(waveform, dtype=dtype, device=device)
1164
+
1165
+ # Uses CPU optimized loop function if available for CPU device
1166
+ if device == torch.device("cpu"):
1167
+ _overdrive_core_loop_cpu(waveform, temp, last_in, last_out, output_waveform)
1168
+ else:
1169
+ _overdrive_core_loop_generic(waveform, temp, last_in, last_out, output_waveform)
1170
+
1171
+ return output_waveform.clamp(min=-1, max=1).view(actual_shape)
1172
+
1173
+
1174
+ def phaser(
1175
+ waveform: Tensor,
1176
+ sample_rate: int,
1177
+ gain_in: float = 0.4,
1178
+ gain_out: float = 0.74,
1179
+ delay_ms: float = 3.0,
1180
+ decay: float = 0.4,
1181
+ mod_speed: float = 0.5,
1182
+ sinusoidal: bool = True,
1183
+ ) -> Tensor:
1184
+ r"""Apply a phasing effect to the audio. Similar to SoX implementation.
1185
+
1186
+ .. devices:: CPU CUDA
1187
+
1188
+ .. properties:: Autograd TorchScript
1189
+
1190
+ Args:
1191
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
1192
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
1193
+ gain_in (float, optional): desired input gain at the boost (or attenuation) in dB
1194
+ Allowed range of values are 0 to 1
1195
+ gain_out (float, optional): desired output gain at the boost (or attenuation) in dB
1196
+ Allowed range of values are 0 to 1e9
1197
+ delay_ms (float, optional): desired delay in milliseconds
1198
+ Allowed range of values are 0 to 5.0
1199
+ decay (float, optional): desired decay relative to gain-in
1200
+ Allowed range of values are 0 to 0.99
1201
+ mod_speed (float, optional): modulation speed in Hz
1202
+ Allowed range of values are 0.1 to 2
1203
+ sinusoidal (bool, optional): If ``True``, uses sinusoidal modulation (preferable for multiple instruments)
1204
+ If ``False``, uses triangular modulation (gives single instruments a sharper phasing effect)
1205
+ (Default: ``True``)
1206
+
1207
+ Returns:
1208
+ Tensor: Waveform of dimension of `(..., time)`
1209
+
1210
+ Reference:
1211
+ - http://sox.sourceforge.net/sox.html
1212
+ - Scott Lehman, `Effects Explained`_.
1213
+
1214
+ .. _Effects Explained:
1215
+ https://web.archive.org/web/20051125072557/http://www.harmony-central.com/Effects/effects-explained.html
1216
+ """
1217
+ actual_shape = waveform.shape
1218
+ device, dtype = waveform.device, waveform.dtype
1219
+
1220
+ # convert to 2D (channels,time)
1221
+ waveform = waveform.view(-1, actual_shape[-1])
1222
+
1223
+ delay_buf_len = int((delay_ms * 0.001 * sample_rate) + 0.5)
1224
+ delay_buf = torch.zeros(waveform.shape[0], delay_buf_len, dtype=dtype, device=device)
1225
+
1226
+ mod_buf_len = int(sample_rate / mod_speed + 0.5)
1227
+
1228
+ if sinusoidal:
1229
+ wave_type = "SINE"
1230
+ else:
1231
+ wave_type = "TRIANGLE"
1232
+
1233
+ mod_buf = _generate_wave_table(
1234
+ wave_type=wave_type,
1235
+ data_type="INT",
1236
+ table_size=mod_buf_len,
1237
+ min=1.0,
1238
+ max=float(delay_buf_len),
1239
+ phase=math.pi / 2,
1240
+ device=device,
1241
+ )
1242
+
1243
+ delay_pos = 0
1244
+ mod_pos = 0
1245
+
1246
+ output_waveform_pre_gain_list = []
1247
+ waveform = waveform * gain_in
1248
+ delay_buf = delay_buf * decay
1249
+ waveform_list = [waveform[:, i] for i in range(waveform.size(1))]
1250
+ delay_buf_list = [delay_buf[:, i] for i in range(delay_buf.size(1))]
1251
+ mod_buf_list = [mod_buf[i] for i in range(mod_buf.size(0))]
1252
+
1253
+ for i in range(waveform.shape[-1]):
1254
+ idx = int((delay_pos + mod_buf_list[mod_pos]) % delay_buf_len)
1255
+ mod_pos = (mod_pos + 1) % mod_buf_len
1256
+ delay_pos = (delay_pos + 1) % delay_buf_len
1257
+ temp = (waveform_list[i]) + (delay_buf_list[idx])
1258
+ delay_buf_list[delay_pos] = temp * decay
1259
+ output_waveform_pre_gain_list.append(temp)
1260
+
1261
+ output_waveform = torch.stack(output_waveform_pre_gain_list, dim=1).to(dtype=dtype, device=device)
1262
+ output_waveform.mul_(gain_out)
1263
+
1264
+ return output_waveform.clamp(min=-1, max=1).view(actual_shape)
1265
+
1266
+
1267
+ def riaa_biquad(waveform: Tensor, sample_rate: int) -> Tensor:
1268
+ r"""Apply RIAA vinyl playback equalization. Similar to SoX implementation.
1269
+
1270
+ .. devices:: CPU CUDA
1271
+
1272
+ .. properties:: Autograd TorchScript
1273
+
1274
+ Args:
1275
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
1276
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz).
1277
+ Allowed sample rates in Hz : ``44100``,``48000``,``88200``,``96000``
1278
+
1279
+ Returns:
1280
+ Tensor: Waveform of dimension of `(..., time)`
1281
+
1282
+ Reference:
1283
+ - http://sox.sourceforge.net/sox.html
1284
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
1285
+ """
1286
+
1287
+ if sample_rate == 44100:
1288
+ zeros = [-0.2014898, 0.9233820]
1289
+ poles = [0.7083149, 0.9924091]
1290
+
1291
+ elif sample_rate == 48000:
1292
+ zeros = [-0.1766069, 0.9321590]
1293
+ poles = [0.7396325, 0.9931330]
1294
+
1295
+ elif sample_rate == 88200:
1296
+ zeros = [-0.1168735, 0.9648312]
1297
+ poles = [0.8590646, 0.9964002]
1298
+
1299
+ elif sample_rate == 96000:
1300
+ zeros = [-0.1141486, 0.9676817]
1301
+ poles = [0.8699137, 0.9966946]
1302
+
1303
+ else:
1304
+ raise ValueError("Sample rate must be 44.1k, 48k, 88.2k, or 96k")
1305
+
1306
+ # polynomial coefficients with roots zeros[0] and zeros[1]
1307
+ b0 = 1.0
1308
+ b1 = -(zeros[0] + zeros[1])
1309
+ b2 = zeros[0] * zeros[1]
1310
+
1311
+ # polynomial coefficients with roots poles[0] and poles[1]
1312
+ a0 = 1.0
1313
+ a1 = -(poles[0] + poles[1])
1314
+ a2 = poles[0] * poles[1]
1315
+
1316
+ # Normalize to 0dB at 1kHz
1317
+ y = 2 * math.pi * 1000 / sample_rate
1318
+ b_re = b0 + b1 * math.cos(-y) + b2 * math.cos(-2 * y)
1319
+ a_re = a0 + a1 * math.cos(-y) + a2 * math.cos(-2 * y)
1320
+ b_im = b1 * math.sin(-y) + b2 * math.sin(-2 * y)
1321
+ a_im = a1 * math.sin(-y) + a2 * math.sin(-2 * y)
1322
+ g = 1 / math.sqrt((b_re**2 + b_im**2) / (a_re**2 + a_im**2))
1323
+
1324
+ b0 *= g
1325
+ b1 *= g
1326
+ b2 *= g
1327
+
1328
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
1329
+
1330
+
1331
+ def treble_biquad(
1332
+ waveform: Tensor,
1333
+ sample_rate: int,
1334
+ gain: float,
1335
+ central_freq: float = 3000,
1336
+ Q: float = 0.707,
1337
+ ) -> Tensor:
1338
+ r"""Design a treble tone-control effect. Similar to SoX implementation.
1339
+
1340
+ .. devices:: CPU CUDA
1341
+
1342
+ .. properties:: Autograd TorchScript
1343
+
1344
+ Args:
1345
+ waveform (Tensor): audio waveform of dimension of `(..., time)`
1346
+ sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz)
1347
+ gain (float or torch.Tensor): desired gain at the boost (or attenuation) in dB.
1348
+ central_freq (float or torch.Tensor, optional): central frequency (in Hz). (Default: ``3000``)
1349
+ Q (float or torch.Tensor, optional): https://en.wikipedia.org/wiki/Q_factor (Default: ``0.707``).
1350
+
1351
+ Returns:
1352
+ Tensor: Waveform of dimension of `(..., time)`
1353
+
1354
+ Reference:
1355
+ - http://sox.sourceforge.net/sox.html
1356
+ - https://www.w3.org/2011/audio/audio-eq-cookbook.html#APF
1357
+ """
1358
+ dtype = waveform.dtype
1359
+ device = waveform.device
1360
+ central_freq = torch.as_tensor(central_freq, dtype=dtype, device=device)
1361
+ Q = torch.as_tensor(Q, dtype=dtype, device=device)
1362
+ gain = torch.as_tensor(gain, dtype=dtype, device=device)
1363
+
1364
+ w0 = 2 * math.pi * central_freq / sample_rate
1365
+ alpha = torch.sin(w0) / 2 / Q
1366
+ A = torch.exp(gain / 40 * math.log(10))
1367
+
1368
+ temp1 = 2 * torch.sqrt(A) * alpha
1369
+ temp2 = (A - 1) * torch.cos(w0)
1370
+ temp3 = (A + 1) * torch.cos(w0)
1371
+
1372
+ b0 = A * ((A + 1) + temp2 + temp1)
1373
+ b1 = -2 * A * ((A - 1) + temp3)
1374
+ b2 = A * ((A + 1) + temp2 - temp1)
1375
+ a0 = (A + 1) - temp2 + temp1
1376
+ a1 = 2 * ((A - 1) - temp3)
1377
+ a2 = (A + 1) - temp2 - temp1
1378
+
1379
+ return biquad(waveform, b0, b1, b2, a0, a1, a2)
1380
+
1381
+
1382
+ def _measure(
1383
+ measure_len_ws: int,
1384
+ samples: Tensor,
1385
+ spectrum: Tensor,
1386
+ noise_spectrum: Tensor,
1387
+ spectrum_window: Tensor,
1388
+ spectrum_start: int,
1389
+ spectrum_end: int,
1390
+ cepstrum_window: Tensor,
1391
+ cepstrum_start: int,
1392
+ cepstrum_end: int,
1393
+ noise_reduction_amount: float,
1394
+ measure_smooth_time_mult: float,
1395
+ noise_up_time_mult: Tensor,
1396
+ noise_down_time_mult: Tensor,
1397
+ boot_count: int,
1398
+ ) -> float:
1399
+ device = samples.device
1400
+
1401
+ if spectrum.size(-1) != noise_spectrum.size(-1):
1402
+ raise ValueError(
1403
+ "Expected spectrum size to match noise spectrum size in final dimension."
1404
+ f"Found: spectrum size: {spectrum.size()}, noise_spectrum size: {noise_spectrum.size()}"
1405
+ )
1406
+
1407
+ dft_len_ws = spectrum.size()[-1]
1408
+
1409
+ dftBuf = torch.zeros(dft_len_ws, device=device)
1410
+
1411
+ dftBuf[:measure_len_ws] = samples * spectrum_window[:measure_len_ws]
1412
+
1413
+ # lsx_safe_rdft((int)p->dft_len_ws, 1, c->dftBuf);
1414
+ _dftBuf = torch.fft.rfft(dftBuf)
1415
+
1416
+ mult: float = boot_count / (1.0 + boot_count) if boot_count >= 0 else measure_smooth_time_mult
1417
+
1418
+ _d = _dftBuf[spectrum_start:spectrum_end].abs()
1419
+ spectrum[spectrum_start:spectrum_end].mul_(mult).add_(_d * (1 - mult))
1420
+ _d = spectrum[spectrum_start:spectrum_end] ** 2
1421
+
1422
+ _zeros = torch.zeros(spectrum_end - spectrum_start, device=device)
1423
+ _mult = (
1424
+ _zeros
1425
+ if boot_count >= 0
1426
+ else torch.where(
1427
+ _d > noise_spectrum[spectrum_start:spectrum_end],
1428
+ noise_up_time_mult, # if
1429
+ noise_down_time_mult, # else,
1430
+ )
1431
+ )
1432
+
1433
+ noise_spectrum[spectrum_start:spectrum_end].mul_(_mult).add_(_d * (1 - _mult))
1434
+ _d = torch.sqrt(
1435
+ torch.max(
1436
+ _zeros,
1437
+ _d - noise_reduction_amount * noise_spectrum[spectrum_start:spectrum_end],
1438
+ ),
1439
+ )
1440
+
1441
+ _cepstrum_Buf: Tensor = torch.zeros(dft_len_ws >> 1, device=device)
1442
+ _cepstrum_Buf[spectrum_start:spectrum_end] = _d * cepstrum_window
1443
+ _cepstrum_Buf[spectrum_end : dft_len_ws >> 1].zero_()
1444
+
1445
+ # lsx_safe_rdft((int)p->dft_len_ws >> 1, 1, c->dftBuf);
1446
+ _cepstrum_Buf = torch.fft.rfft(_cepstrum_Buf)
1447
+
1448
+ result: float = float(torch.sum(_cepstrum_Buf[cepstrum_start:cepstrum_end].abs().pow(2)))
1449
+ result = math.log(result / (cepstrum_end - cepstrum_start)) if result > 0 else -math.inf
1450
+ return max(0, 21 + result)
1451
+
1452
+
1453
+ def vad(
1454
+ waveform: Tensor,
1455
+ sample_rate: int,
1456
+ trigger_level: float = 7.0,
1457
+ trigger_time: float = 0.25,
1458
+ search_time: float = 1.0,
1459
+ allowed_gap: float = 0.25,
1460
+ pre_trigger_time: float = 0.0,
1461
+ # Fine-tuning parameters
1462
+ boot_time: float = 0.35,
1463
+ noise_up_time: float = 0.1,
1464
+ noise_down_time: float = 0.01,
1465
+ noise_reduction_amount: float = 1.35,
1466
+ measure_freq: float = 20.0,
1467
+ measure_duration: Optional[float] = None,
1468
+ measure_smooth_time: float = 0.4,
1469
+ hp_filter_freq: float = 50.0,
1470
+ lp_filter_freq: float = 6000.0,
1471
+ hp_lifter_freq: float = 150.0,
1472
+ lp_lifter_freq: float = 2000.0,
1473
+ ) -> Tensor:
1474
+ r"""Voice Activity Detector. Similar to SoX implementation.
1475
+
1476
+ .. devices:: CPU CUDA
1477
+
1478
+ .. properties:: TorchScript
1479
+
1480
+ Attempts to trim silence and quiet background sounds from the ends of recordings of speech.
1481
+ The algorithm currently uses a simple cepstral power measurement to detect voice,
1482
+ so may be fooled by other things, especially music.
1483
+
1484
+ The effect can trim only from the front of the audio,
1485
+ so in order to trim from the back, the reverse effect must also be used.
1486
+
1487
+ Args:
1488
+ waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)`
1489
+ Tensor of shape `(channels, time)` is treated as a multi-channel recording
1490
+ of the same event and the resulting output will be trimmed to the earliest
1491
+ voice activity in any channel.
1492
+ sample_rate (int): Sample rate of audio signal.
1493
+ trigger_level (float, optional): The measurement level used to trigger activity detection.
1494
+ This may need to be cahnged depending on the noise level, signal level,
1495
+ and other characteristics of the input audio. (Default: 7.0)
1496
+ trigger_time (float, optional): The time constant (in seconds)
1497
+ used to help ignore short bursts of sound. (Default: 0.25)
1498
+ search_time (float, optional): The amount of audio (in seconds)
1499
+ to search for quieter/shorter bursts of audio to include prior
1500
+ to the detected trigger point. (Default: 1.0)
1501
+ allowed_gap (float, optional): The allowed gap (in seconds) between
1502
+ quieter/shorter bursts of audio to include prior
1503
+ to the detected trigger point. (Default: 0.25)
1504
+ pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve
1505
+ before the trigger point and any found quieter/shorter bursts. (Default: 0.0)
1506
+ boot_time (float, optional) The algorithm (internally) uses adaptive noise
1507
+ estimation/reduction in order to detect the start of the wanted audio.
1508
+ This option sets the time for the initial noise estimate. (Default: 0.35)
1509
+ noise_up_time (float, optional) Time constant used by the adaptive noise estimator
1510
+ for when the noise level is increasing. (Default: 0.1)
1511
+ noise_down_time (float, optional) Time constant used by the adaptive noise estimator
1512
+ for when the noise level is decreasing. (Default: 0.01)
1513
+ noise_reduction_amount (float, optional) Amount of noise reduction to use in
1514
+ the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)
1515
+ measure_freq (float, optional) Frequency of the algorithm's
1516
+ processing/measurements. (Default: 20.0)
1517
+ measure_duration: (float, optional) Measurement duration.
1518
+ (Default: Twice the measurement period; i.e. with overlap.)
1519
+ measure_smooth_time (float, optional) Time constant used to smooth
1520
+ spectral measurements. (Default: 0.4)
1521
+ hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied
1522
+ at the input to the detector algorithm. (Default: 50.0)
1523
+ lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied
1524
+ at the input to the detector algorithm. (Default: 6000.0)
1525
+ hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used
1526
+ in the detector algorithm. (Default: 150.0)
1527
+ lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used
1528
+ in the detector algorithm. (Default: 2000.0)
1529
+
1530
+ Returns:
1531
+ Tensor: Tensor of audio of dimension `(..., time)`.
1532
+
1533
+ Reference:
1534
+ - http://sox.sourceforge.net/sox.html
1535
+ """
1536
+ device = waveform.device
1537
+
1538
+ if waveform.ndim > 2:
1539
+ warnings.warn(
1540
+ "Expected input tensor dimension of 1 for single channel"
1541
+ f" or 2 for multi-channel. Got {waveform.ndim} instead. "
1542
+ "Batch semantics is not supported. "
1543
+ "Please refer to https://github.com/pytorch/audio/issues/1348"
1544
+ " and https://github.com/pytorch/audio/issues/1468."
1545
+ )
1546
+
1547
+ measure_duration: float = 2.0 / measure_freq if measure_duration is None else measure_duration
1548
+
1549
+ measure_len_ws = int(sample_rate * measure_duration + 0.5)
1550
+ measure_len_ns = measure_len_ws
1551
+ # for (dft_len_ws = 16; dft_len_ws < measure_len_ws; dft_len_ws <<= 1);
1552
+ dft_len_ws = 16
1553
+ while dft_len_ws < measure_len_ws:
1554
+ dft_len_ws *= 2
1555
+
1556
+ measure_period_ns = int(sample_rate / measure_freq + 0.5)
1557
+ measures_len = math.ceil(search_time * measure_freq)
1558
+ search_pre_trigger_len_ns = measures_len * measure_period_ns
1559
+ gap_len = int(allowed_gap * measure_freq + 0.5)
1560
+
1561
+ fixed_pre_trigger_len_ns = int(pre_trigger_time * sample_rate + 0.5)
1562
+ samplesLen_ns = fixed_pre_trigger_len_ns + search_pre_trigger_len_ns + measure_len_ns
1563
+
1564
+ spectrum_window = torch.zeros(measure_len_ws, device=device)
1565
+ for i in range(measure_len_ws):
1566
+ # sox.h:741 define SOX_SAMPLE_MIN (sox_sample_t)SOX_INT_MIN(32)
1567
+ spectrum_window[i] = 2.0 / math.sqrt(float(measure_len_ws))
1568
+ # lsx_apply_hann(spectrum_window, (int)measure_len_ws);
1569
+ spectrum_window *= torch.hann_window(measure_len_ws, device=device, dtype=torch.float)
1570
+
1571
+ spectrum_start: int = int(hp_filter_freq / sample_rate * dft_len_ws + 0.5)
1572
+ spectrum_start: int = max(spectrum_start, 1)
1573
+ spectrum_end: int = int(lp_filter_freq / sample_rate * dft_len_ws + 0.5)
1574
+ spectrum_end: int = min(spectrum_end, dft_len_ws // 2)
1575
+
1576
+ cepstrum_window = torch.zeros(spectrum_end - spectrum_start, device=device)
1577
+ for i in range(spectrum_end - spectrum_start):
1578
+ cepstrum_window[i] = 2.0 / math.sqrt(float(spectrum_end) - spectrum_start)
1579
+ # lsx_apply_hann(cepstrum_window,(int)(spectrum_end - spectrum_start));
1580
+ cepstrum_window *= torch.hann_window(spectrum_end - spectrum_start, device=device, dtype=torch.float)
1581
+
1582
+ cepstrum_start = math.ceil(sample_rate * 0.5 / lp_lifter_freq)
1583
+ cepstrum_end = math.floor(sample_rate * 0.5 / hp_lifter_freq)
1584
+ cepstrum_end = min(cepstrum_end, dft_len_ws // 4)
1585
+
1586
+ if cepstrum_end <= cepstrum_start:
1587
+ raise ValueError(
1588
+ "Expected cepstrum_start to be smaller than cepstrum_end."
1589
+ f"Found: cepstrum_start: {cepstrum_start}, cepstrum_end: {cepstrum_end}."
1590
+ )
1591
+
1592
+ noise_up_time_mult = torch.tensor(math.exp(-1.0 / (noise_up_time * measure_freq)), device=device)
1593
+ noise_down_time_mult = torch.tensor(math.exp(-1.0 / (noise_down_time * measure_freq)), device=device)
1594
+ measure_smooth_time_mult = math.exp(-1.0 / (measure_smooth_time * measure_freq))
1595
+ trigger_meas_time_mult = math.exp(-1.0 / (trigger_time * measure_freq))
1596
+
1597
+ boot_count_max = int(boot_time * measure_freq - 0.5)
1598
+ boot_count = measures_index = flushedLen_ns = 0
1599
+
1600
+ # pack batch
1601
+ shape = waveform.size()
1602
+ waveform = waveform.view(-1, shape[-1])
1603
+
1604
+ n_channels, ilen = waveform.size()
1605
+
1606
+ mean_meas = torch.zeros(n_channels, device=device)
1607
+ spectrum = torch.zeros(n_channels, dft_len_ws, device=device)
1608
+ noise_spectrum = torch.zeros(n_channels, dft_len_ws, device=device)
1609
+ measures = torch.zeros(n_channels, measures_len, device=device)
1610
+
1611
+ has_triggered: bool = False
1612
+ num_measures_to_flush: int = 0
1613
+
1614
+ pos = 0
1615
+ for pos in range(measure_len_ns, ilen, measure_period_ns):
1616
+ for i in range(n_channels):
1617
+ meas: float = _measure(
1618
+ measure_len_ws=measure_len_ws,
1619
+ samples=waveform[i, pos - measure_len_ws : pos],
1620
+ spectrum=spectrum[i],
1621
+ noise_spectrum=noise_spectrum[i],
1622
+ spectrum_window=spectrum_window,
1623
+ spectrum_start=spectrum_start,
1624
+ spectrum_end=spectrum_end,
1625
+ cepstrum_window=cepstrum_window,
1626
+ cepstrum_start=cepstrum_start,
1627
+ cepstrum_end=cepstrum_end,
1628
+ noise_reduction_amount=noise_reduction_amount,
1629
+ measure_smooth_time_mult=measure_smooth_time_mult,
1630
+ noise_up_time_mult=noise_up_time_mult,
1631
+ noise_down_time_mult=noise_down_time_mult,
1632
+ boot_count=boot_count,
1633
+ )
1634
+ measures[i, measures_index] = meas
1635
+ mean_meas[i] = mean_meas[i] * trigger_meas_time_mult + meas * (1.0 - trigger_meas_time_mult)
1636
+
1637
+ has_triggered = has_triggered or (mean_meas[i] >= trigger_level)
1638
+ if has_triggered:
1639
+ n: int = measures_len
1640
+ k: int = measures_index
1641
+ jTrigger: int = n
1642
+ jZero: int = n
1643
+ j: int = 0
1644
+
1645
+ for j in range(n):
1646
+ if (measures[i, k] >= trigger_level) and (j <= jTrigger + gap_len):
1647
+ jZero = jTrigger = j
1648
+ elif (measures[i, k] == 0) and (jTrigger >= jZero):
1649
+ jZero = j
1650
+ k = (k + n - 1) % n
1651
+ j = min(j, jZero)
1652
+ # num_measures_to_flush = range_limit(j, num_measures_to_flush, n);
1653
+ num_measures_to_flush = min(max(num_measures_to_flush, j), n)
1654
+ # end if has_triggered
1655
+ # end for channel
1656
+ measures_index += 1
1657
+ measures_index = measures_index % measures_len
1658
+ if boot_count >= 0:
1659
+ boot_count = -1 if boot_count == boot_count_max else boot_count + 1
1660
+
1661
+ if has_triggered:
1662
+ flushedLen_ns = (measures_len - num_measures_to_flush) * measure_period_ns
1663
+ break
1664
+ # end for window
1665
+ if not has_triggered:
1666
+ return waveform[..., :0].view(shape[:-1] + torch.Size([0]))
1667
+
1668
+ res = waveform[:, max(pos - samplesLen_ns + flushedLen_ns, 0) :]
1669
+ # unpack batch
1670
+ return res.view(shape[:-1] + res.shape[-1:])