torchaudio 2.9.1__cp311-cp311-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. torchaudio/__init__.py +204 -0
  2. torchaudio/_extension/__init__.py +61 -0
  3. torchaudio/_extension/utils.py +133 -0
  4. torchaudio/_internal/__init__.py +10 -0
  5. torchaudio/_internal/module_utils.py +171 -0
  6. torchaudio/_torchcodec.py +340 -0
  7. torchaudio/compliance/__init__.py +5 -0
  8. torchaudio/compliance/kaldi.py +813 -0
  9. torchaudio/datasets/__init__.py +47 -0
  10. torchaudio/datasets/cmuarctic.py +157 -0
  11. torchaudio/datasets/cmudict.py +186 -0
  12. torchaudio/datasets/commonvoice.py +86 -0
  13. torchaudio/datasets/dr_vctk.py +121 -0
  14. torchaudio/datasets/fluentcommands.py +108 -0
  15. torchaudio/datasets/gtzan.py +1118 -0
  16. torchaudio/datasets/iemocap.py +147 -0
  17. torchaudio/datasets/librilight_limited.py +111 -0
  18. torchaudio/datasets/librimix.py +133 -0
  19. torchaudio/datasets/librispeech.py +174 -0
  20. torchaudio/datasets/librispeech_biasing.py +189 -0
  21. torchaudio/datasets/libritts.py +168 -0
  22. torchaudio/datasets/ljspeech.py +107 -0
  23. torchaudio/datasets/musdb_hq.py +139 -0
  24. torchaudio/datasets/quesst14.py +136 -0
  25. torchaudio/datasets/snips.py +157 -0
  26. torchaudio/datasets/speechcommands.py +183 -0
  27. torchaudio/datasets/tedlium.py +218 -0
  28. torchaudio/datasets/utils.py +54 -0
  29. torchaudio/datasets/vctk.py +143 -0
  30. torchaudio/datasets/voxceleb1.py +309 -0
  31. torchaudio/datasets/yesno.py +89 -0
  32. torchaudio/functional/__init__.py +130 -0
  33. torchaudio/functional/_alignment.py +128 -0
  34. torchaudio/functional/filtering.py +1685 -0
  35. torchaudio/functional/functional.py +2505 -0
  36. torchaudio/lib/__init__.py +0 -0
  37. torchaudio/lib/_torchaudio.so +0 -0
  38. torchaudio/lib/libtorchaudio.so +0 -0
  39. torchaudio/models/__init__.py +85 -0
  40. torchaudio/models/_hdemucs.py +1008 -0
  41. torchaudio/models/conformer.py +293 -0
  42. torchaudio/models/conv_tasnet.py +330 -0
  43. torchaudio/models/decoder/__init__.py +64 -0
  44. torchaudio/models/decoder/_ctc_decoder.py +568 -0
  45. torchaudio/models/decoder/_cuda_ctc_decoder.py +187 -0
  46. torchaudio/models/deepspeech.py +84 -0
  47. torchaudio/models/emformer.py +884 -0
  48. torchaudio/models/rnnt.py +816 -0
  49. torchaudio/models/rnnt_decoder.py +339 -0
  50. torchaudio/models/squim/__init__.py +11 -0
  51. torchaudio/models/squim/objective.py +326 -0
  52. torchaudio/models/squim/subjective.py +150 -0
  53. torchaudio/models/tacotron2.py +1046 -0
  54. torchaudio/models/wav2letter.py +72 -0
  55. torchaudio/models/wav2vec2/__init__.py +45 -0
  56. torchaudio/models/wav2vec2/components.py +1167 -0
  57. torchaudio/models/wav2vec2/model.py +1579 -0
  58. torchaudio/models/wav2vec2/utils/__init__.py +7 -0
  59. torchaudio/models/wav2vec2/utils/import_fairseq.py +213 -0
  60. torchaudio/models/wav2vec2/utils/import_huggingface.py +134 -0
  61. torchaudio/models/wav2vec2/wavlm_attention.py +214 -0
  62. torchaudio/models/wavernn.py +409 -0
  63. torchaudio/pipelines/__init__.py +102 -0
  64. torchaudio/pipelines/_source_separation_pipeline.py +109 -0
  65. torchaudio/pipelines/_squim_pipeline.py +156 -0
  66. torchaudio/pipelines/_tts/__init__.py +16 -0
  67. torchaudio/pipelines/_tts/impl.py +385 -0
  68. torchaudio/pipelines/_tts/interface.py +255 -0
  69. torchaudio/pipelines/_tts/utils.py +230 -0
  70. torchaudio/pipelines/_wav2vec2/__init__.py +0 -0
  71. torchaudio/pipelines/_wav2vec2/aligner.py +87 -0
  72. torchaudio/pipelines/_wav2vec2/impl.py +1699 -0
  73. torchaudio/pipelines/_wav2vec2/utils.py +346 -0
  74. torchaudio/pipelines/rnnt_pipeline.py +380 -0
  75. torchaudio/transforms/__init__.py +78 -0
  76. torchaudio/transforms/_multi_channel.py +467 -0
  77. torchaudio/transforms/_transforms.py +2138 -0
  78. torchaudio/utils/__init__.py +4 -0
  79. torchaudio/utils/download.py +89 -0
  80. torchaudio/version.py +2 -0
  81. torchaudio-2.9.1.dist-info/METADATA +133 -0
  82. torchaudio-2.9.1.dist-info/RECORD +85 -0
  83. torchaudio-2.9.1.dist-info/WHEEL +5 -0
  84. torchaudio-2.9.1.dist-info/licenses/LICENSE +25 -0
  85. torchaudio-2.9.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,813 @@
1
+ import math
2
+ from typing import Tuple
3
+
4
+ import torch
5
+ import torchaudio
6
+ from torch import Tensor
7
+
8
+ __all__ = [
9
+ "get_mel_banks",
10
+ "inverse_mel_scale",
11
+ "inverse_mel_scale_scalar",
12
+ "mel_scale",
13
+ "mel_scale_scalar",
14
+ "spectrogram",
15
+ "fbank",
16
+ "mfcc",
17
+ "vtln_warp_freq",
18
+ "vtln_warp_mel_freq",
19
+ ]
20
+
21
+ # numeric_limits<float>::epsilon() 1.1920928955078125e-07
22
+ EPSILON = torch.tensor(torch.finfo(torch.float).eps)
23
+ # 1 milliseconds = 0.001 seconds
24
+ MILLISECONDS_TO_SECONDS = 0.001
25
+
26
+ # window types
27
+ HAMMING = "hamming"
28
+ HANNING = "hanning"
29
+ POVEY = "povey"
30
+ RECTANGULAR = "rectangular"
31
+ BLACKMAN = "blackman"
32
+ WINDOWS = [HAMMING, HANNING, POVEY, RECTANGULAR, BLACKMAN]
33
+
34
+
35
+ def _get_epsilon(device, dtype):
36
+ return EPSILON.to(device=device, dtype=dtype)
37
+
38
+
39
+ def _next_power_of_2(x: int) -> int:
40
+ r"""Returns the smallest power of 2 that is greater than x"""
41
+ return 1 if x == 0 else 2 ** (x - 1).bit_length()
42
+
43
+
44
+ def _get_strided(waveform: Tensor, window_size: int, window_shift: int, snip_edges: bool) -> Tensor:
45
+ r"""Given a waveform (1D tensor of size ``num_samples``), it returns a 2D tensor (m, ``window_size``)
46
+ representing how the window is shifted along the waveform. Each row is a frame.
47
+
48
+ Args:
49
+ waveform (Tensor): Tensor of size ``num_samples``
50
+ window_size (int): Frame length
51
+ window_shift (int): Frame shift
52
+ snip_edges (bool): If True, end effects will be handled by outputting only frames that completely fit
53
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
54
+ depends only on the frame_shift, and we reflect the data at the ends.
55
+
56
+ Returns:
57
+ Tensor: 2D tensor of size (m, ``window_size``) where each row is a frame
58
+ """
59
+ assert waveform.dim() == 1
60
+ num_samples = waveform.size(0)
61
+ strides = (window_shift * waveform.stride(0), waveform.stride(0))
62
+
63
+ if snip_edges:
64
+ if num_samples < window_size:
65
+ return torch.empty((0, 0), dtype=waveform.dtype, device=waveform.device)
66
+ else:
67
+ m = 1 + (num_samples - window_size) // window_shift
68
+ else:
69
+ reversed_waveform = torch.flip(waveform, [0])
70
+ m = (num_samples + (window_shift // 2)) // window_shift
71
+ pad = window_size // 2 - window_shift // 2
72
+ pad_right = reversed_waveform
73
+ if pad > 0:
74
+ # torch.nn.functional.pad returns [2,1,0,1,2] for 'reflect'
75
+ # but we want [2, 1, 0, 0, 1, 2]
76
+ pad_left = reversed_waveform[-pad:]
77
+ waveform = torch.cat((pad_left, waveform, pad_right), dim=0)
78
+ else:
79
+ # pad is negative so we want to trim the waveform at the front
80
+ waveform = torch.cat((waveform[-pad:], pad_right), dim=0)
81
+
82
+ sizes = (m, window_size)
83
+ return waveform.as_strided(sizes, strides)
84
+
85
+
86
+ def _feature_window_function(
87
+ window_type: str,
88
+ window_size: int,
89
+ blackman_coeff: float,
90
+ device: torch.device,
91
+ dtype: int,
92
+ ) -> Tensor:
93
+ r"""Returns a window function with the given type and size"""
94
+ if window_type == HANNING:
95
+ return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
96
+ elif window_type == HAMMING:
97
+ return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
98
+ elif window_type == POVEY:
99
+ # like hanning but goes to zero at edges
100
+ return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
101
+ elif window_type == RECTANGULAR:
102
+ return torch.ones(window_size, device=device, dtype=dtype)
103
+ elif window_type == BLACKMAN:
104
+ a = 2 * math.pi / (window_size - 1)
105
+ window_function = torch.arange(window_size, device=device, dtype=dtype)
106
+ # can't use torch.blackman_window as they use different coefficients
107
+ return (
108
+ blackman_coeff
109
+ - 0.5 * torch.cos(a * window_function)
110
+ + (0.5 - blackman_coeff) * torch.cos(2 * a * window_function)
111
+ ).to(device=device, dtype=dtype)
112
+ else:
113
+ raise Exception("Invalid window type " + window_type)
114
+
115
+
116
+ def _get_log_energy(strided_input: Tensor, epsilon: Tensor, energy_floor: float) -> Tensor:
117
+ r"""Returns the log energy of size (m) for a strided_input (m,*)"""
118
+ device, dtype = strided_input.device, strided_input.dtype
119
+ log_energy = torch.max(strided_input.pow(2).sum(1), epsilon).log() # size (m)
120
+ if energy_floor == 0.0:
121
+ return log_energy
122
+ return torch.max(log_energy, torch.tensor(math.log(energy_floor), device=device, dtype=dtype))
123
+
124
+
125
+ def _get_waveform_and_window_properties(
126
+ waveform: Tensor,
127
+ channel: int,
128
+ sample_frequency: float,
129
+ frame_shift: float,
130
+ frame_length: float,
131
+ round_to_power_of_two: bool,
132
+ preemphasis_coefficient: float,
133
+ ) -> Tuple[Tensor, int, int, int]:
134
+ r"""Gets the waveform and window properties"""
135
+ channel = max(channel, 0)
136
+ assert channel < waveform.size(0), "Invalid channel {} for size {}".format(channel, waveform.size(0))
137
+ waveform = waveform[channel, :] # size (n)
138
+ window_shift = int(sample_frequency * frame_shift * MILLISECONDS_TO_SECONDS)
139
+ window_size = int(sample_frequency * frame_length * MILLISECONDS_TO_SECONDS)
140
+ padded_window_size = _next_power_of_2(window_size) if round_to_power_of_two else window_size
141
+
142
+ assert 2 <= window_size <= len(waveform), "choose a window size {} that is [2, {}]".format(
143
+ window_size, len(waveform)
144
+ )
145
+ assert 0 < window_shift, "`window_shift` must be greater than 0"
146
+ assert padded_window_size % 2 == 0, (
147
+ "the padded `window_size` must be divisible by two." " use `round_to_power_of_two` or change `frame_length`"
148
+ )
149
+ assert 0.0 <= preemphasis_coefficient <= 1.0, "`preemphasis_coefficient` must be between [0,1]"
150
+ assert sample_frequency > 0, "`sample_frequency` must be greater than zero"
151
+ return waveform, window_shift, window_size, padded_window_size
152
+
153
+
154
+ def _get_window(
155
+ waveform: Tensor,
156
+ padded_window_size: int,
157
+ window_size: int,
158
+ window_shift: int,
159
+ window_type: str,
160
+ blackman_coeff: float,
161
+ snip_edges: bool,
162
+ raw_energy: bool,
163
+ energy_floor: float,
164
+ dither: float,
165
+ remove_dc_offset: bool,
166
+ preemphasis_coefficient: float,
167
+ ) -> Tuple[Tensor, Tensor]:
168
+ r"""Gets a window and its log energy
169
+
170
+ Returns:
171
+ (Tensor, Tensor): strided_input of size (m, ``padded_window_size``) and signal_log_energy of size (m)
172
+ """
173
+ device, dtype = waveform.device, waveform.dtype
174
+ epsilon = _get_epsilon(device, dtype)
175
+
176
+ # size (m, window_size)
177
+ strided_input = _get_strided(waveform, window_size, window_shift, snip_edges)
178
+
179
+ if dither != 0.0:
180
+ rand_gauss = torch.randn(strided_input.shape, device=device, dtype=dtype)
181
+ strided_input = strided_input + rand_gauss * dither
182
+
183
+ if remove_dc_offset:
184
+ # Subtract each row/frame by its mean
185
+ row_means = torch.mean(strided_input, dim=1).unsqueeze(1) # size (m, 1)
186
+ strided_input = strided_input - row_means
187
+
188
+ if raw_energy:
189
+ # Compute the log energy of each row/frame before applying preemphasis and
190
+ # window function
191
+ signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
192
+
193
+ if preemphasis_coefficient != 0.0:
194
+ # strided_input[i,j] -= preemphasis_coefficient * strided_input[i, max(0, j-1)] for all i,j
195
+ offset_strided_input = torch.nn.functional.pad(strided_input.unsqueeze(0), (1, 0), mode="replicate").squeeze(
196
+ 0
197
+ ) # size (m, window_size + 1)
198
+ strided_input = strided_input - preemphasis_coefficient * offset_strided_input[:, :-1]
199
+
200
+ # Apply window_function to each row/frame
201
+ window_function = _feature_window_function(window_type, window_size, blackman_coeff, device, dtype).unsqueeze(
202
+ 0
203
+ ) # size (1, window_size)
204
+ strided_input = strided_input * window_function # size (m, window_size)
205
+
206
+ # Pad columns with zero until we reach size (m, padded_window_size)
207
+ if padded_window_size != window_size:
208
+ padding_right = padded_window_size - window_size
209
+ strided_input = torch.nn.functional.pad(
210
+ strided_input.unsqueeze(0), (0, padding_right), mode="constant", value=0
211
+ ).squeeze(0)
212
+
213
+ # Compute energy after window function (not the raw one)
214
+ if not raw_energy:
215
+ signal_log_energy = _get_log_energy(strided_input, epsilon, energy_floor) # size (m)
216
+
217
+ return strided_input, signal_log_energy
218
+
219
+
220
+ def _subtract_column_mean(tensor: Tensor, subtract_mean: bool) -> Tensor:
221
+ # subtracts the column mean of the tensor size (m, n) if subtract_mean=True
222
+ # it returns size (m, n)
223
+ if subtract_mean:
224
+ col_means = torch.mean(tensor, dim=0).unsqueeze(0)
225
+ tensor = tensor - col_means
226
+ return tensor
227
+
228
+
229
+ def spectrogram(
230
+ waveform: Tensor,
231
+ blackman_coeff: float = 0.42,
232
+ channel: int = -1,
233
+ dither: float = 0.0,
234
+ energy_floor: float = 1.0,
235
+ frame_length: float = 25.0,
236
+ frame_shift: float = 10.0,
237
+ min_duration: float = 0.0,
238
+ preemphasis_coefficient: float = 0.97,
239
+ raw_energy: bool = True,
240
+ remove_dc_offset: bool = True,
241
+ round_to_power_of_two: bool = True,
242
+ sample_frequency: float = 16000.0,
243
+ snip_edges: bool = True,
244
+ subtract_mean: bool = False,
245
+ window_type: str = POVEY,
246
+ ) -> Tensor:
247
+ r"""Create a spectrogram from a raw audio signal. This matches the input/output of Kaldi's
248
+ compute-spectrogram-feats.
249
+
250
+ Args:
251
+ waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
252
+ blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
253
+ channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
254
+ dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
255
+ the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
256
+ energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
257
+ this floor is applied to the zeroth component, representing the total signal energy. The floor on the
258
+ individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
259
+ frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
260
+ frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
261
+ min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
262
+ preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
263
+ raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
264
+ remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
265
+ round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
266
+ to FFT. (Default: ``True``)
267
+ sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
268
+ specified there) (Default: ``16000.0``)
269
+ snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
270
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
271
+ depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
272
+ subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
273
+ it this way. (Default: ``False``)
274
+ window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
275
+ (Default: ``'povey'``)
276
+
277
+ Returns:
278
+ Tensor: A spectrogram identical to what Kaldi would output. The shape is
279
+ (m, ``padded_window_size // 2 + 1``) where m is calculated in _get_strided
280
+ """
281
+ device, dtype = waveform.device, waveform.dtype
282
+ epsilon = _get_epsilon(device, dtype)
283
+
284
+ waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
285
+ waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient
286
+ )
287
+
288
+ if len(waveform) < min_duration * sample_frequency:
289
+ # signal is too short
290
+ return torch.empty(0)
291
+
292
+ strided_input, signal_log_energy = _get_window(
293
+ waveform,
294
+ padded_window_size,
295
+ window_size,
296
+ window_shift,
297
+ window_type,
298
+ blackman_coeff,
299
+ snip_edges,
300
+ raw_energy,
301
+ energy_floor,
302
+ dither,
303
+ remove_dc_offset,
304
+ preemphasis_coefficient,
305
+ )
306
+
307
+ # size (m, padded_window_size // 2 + 1, 2)
308
+ fft = torch.fft.rfft(strided_input)
309
+
310
+ # Convert the FFT into a power spectrum
311
+ power_spectrum = torch.max(fft.abs().pow(2.0), epsilon).log() # size (m, padded_window_size // 2 + 1)
312
+ power_spectrum[:, 0] = signal_log_energy
313
+
314
+ power_spectrum = _subtract_column_mean(power_spectrum, subtract_mean)
315
+ return power_spectrum
316
+
317
+
318
+ def inverse_mel_scale_scalar(mel_freq: float) -> float:
319
+ return 700.0 * (math.exp(mel_freq / 1127.0) - 1.0)
320
+
321
+
322
+ def inverse_mel_scale(mel_freq: Tensor) -> Tensor:
323
+ return 700.0 * ((mel_freq / 1127.0).exp() - 1.0)
324
+
325
+
326
+ def mel_scale_scalar(freq: float) -> float:
327
+ return 1127.0 * math.log(1.0 + freq / 700.0)
328
+
329
+
330
+ def mel_scale(freq: Tensor) -> Tensor:
331
+ return 1127.0 * (1.0 + freq / 700.0).log()
332
+
333
+
334
+ def vtln_warp_freq(
335
+ vtln_low_cutoff: float,
336
+ vtln_high_cutoff: float,
337
+ low_freq: float,
338
+ high_freq: float,
339
+ vtln_warp_factor: float,
340
+ freq: Tensor,
341
+ ) -> Tensor:
342
+ r"""This computes a VTLN warping function that is not the same as HTK's one,
343
+ but has similar inputs (this function has the advantage of never producing
344
+ empty bins).
345
+
346
+ This function computes a warp function F(freq), defined between low_freq
347
+ and high_freq inclusive, with the following properties:
348
+ F(low_freq) == low_freq
349
+ F(high_freq) == high_freq
350
+ The function is continuous and piecewise linear with two inflection
351
+ points.
352
+ The lower inflection point (measured in terms of the unwarped
353
+ frequency) is at frequency l, determined as described below.
354
+ The higher inflection point is at a frequency h, determined as
355
+ described below.
356
+ If l <= f <= h, then F(f) = f/vtln_warp_factor.
357
+ If the higher inflection point (measured in terms of the unwarped
358
+ frequency) is at h, then max(h, F(h)) == vtln_high_cutoff.
359
+ Since (by the last point) F(h) == h/vtln_warp_factor, then
360
+ max(h, h/vtln_warp_factor) == vtln_high_cutoff, so
361
+ h = vtln_high_cutoff / max(1, 1/vtln_warp_factor).
362
+ = vtln_high_cutoff * min(1, vtln_warp_factor).
363
+ If the lower inflection point (measured in terms of the unwarped
364
+ frequency) is at l, then min(l, F(l)) == vtln_low_cutoff
365
+ This implies that l = vtln_low_cutoff / min(1, 1/vtln_warp_factor)
366
+ = vtln_low_cutoff * max(1, vtln_warp_factor)
367
+ Args:
368
+ vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
369
+ vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
370
+ low_freq (float): Lower frequency cutoffs in mel computation
371
+ high_freq (float): Upper frequency cutoffs in mel computation
372
+ vtln_warp_factor (float): Vtln warp factor
373
+ freq (Tensor): given frequency in Hz
374
+
375
+ Returns:
376
+ Tensor: Freq after vtln warp
377
+ """
378
+ assert vtln_low_cutoff > low_freq, "be sure to set the vtln_low option higher than low_freq"
379
+ assert vtln_high_cutoff < high_freq, "be sure to set the vtln_high option lower than high_freq [or negative]"
380
+ l = vtln_low_cutoff * max(1.0, vtln_warp_factor)
381
+ h = vtln_high_cutoff * min(1.0, vtln_warp_factor)
382
+ scale = 1.0 / vtln_warp_factor
383
+ Fl = scale * l # F(l)
384
+ Fh = scale * h # F(h)
385
+ assert l > low_freq and h < high_freq
386
+ # slope of left part of the 3-piece linear function
387
+ scale_left = (Fl - low_freq) / (l - low_freq)
388
+ # [slope of center part is just "scale"]
389
+
390
+ # slope of right part of the 3-piece linear function
391
+ scale_right = (high_freq - Fh) / (high_freq - h)
392
+
393
+ res = torch.empty_like(freq)
394
+
395
+ outside_low_high_freq = torch.lt(freq, low_freq) | torch.gt(freq, high_freq) # freq < low_freq || freq > high_freq
396
+ before_l = torch.lt(freq, l) # freq < l
397
+ before_h = torch.lt(freq, h) # freq < h
398
+ after_h = torch.ge(freq, h) # freq >= h
399
+
400
+ # order of operations matter here (since there is overlapping frequency regions)
401
+ res[after_h] = high_freq + scale_right * (freq[after_h] - high_freq)
402
+ res[before_h] = scale * freq[before_h]
403
+ res[before_l] = low_freq + scale_left * (freq[before_l] - low_freq)
404
+ res[outside_low_high_freq] = freq[outside_low_high_freq]
405
+
406
+ return res
407
+
408
+
409
+ def vtln_warp_mel_freq(
410
+ vtln_low_cutoff: float,
411
+ vtln_high_cutoff: float,
412
+ low_freq,
413
+ high_freq: float,
414
+ vtln_warp_factor: float,
415
+ mel_freq: Tensor,
416
+ ) -> Tensor:
417
+ r"""
418
+ Args:
419
+ vtln_low_cutoff (float): Lower frequency cutoffs for VTLN
420
+ vtln_high_cutoff (float): Upper frequency cutoffs for VTLN
421
+ low_freq (float): Lower frequency cutoffs in mel computation
422
+ high_freq (float): Upper frequency cutoffs in mel computation
423
+ vtln_warp_factor (float): Vtln warp factor
424
+ mel_freq (Tensor): Given frequency in Mel
425
+
426
+ Returns:
427
+ Tensor: ``mel_freq`` after vtln warp
428
+ """
429
+ return mel_scale(
430
+ vtln_warp_freq(
431
+ vtln_low_cutoff, vtln_high_cutoff, low_freq, high_freq, vtln_warp_factor, inverse_mel_scale(mel_freq)
432
+ )
433
+ )
434
+
435
+
436
+ def get_mel_banks(
437
+ num_bins: int,
438
+ window_length_padded: int,
439
+ sample_freq: float,
440
+ low_freq: float,
441
+ high_freq: float,
442
+ vtln_low: float,
443
+ vtln_high: float,
444
+ vtln_warp_factor: float,
445
+ ) -> Tuple[Tensor, Tensor]:
446
+ """
447
+ Returns:
448
+ (Tensor, Tensor): The tuple consists of ``bins`` (which is
449
+ melbank of size (``num_bins``, ``num_fft_bins``)) and ``center_freqs`` (which is
450
+ center frequencies of bins of size (``num_bins``)).
451
+ """
452
+ assert num_bins > 3, "Must have at least 3 mel bins"
453
+ assert window_length_padded % 2 == 0
454
+ num_fft_bins = window_length_padded / 2
455
+ nyquist = 0.5 * sample_freq
456
+
457
+ if high_freq <= 0.0:
458
+ high_freq += nyquist
459
+
460
+ assert (
461
+ (0.0 <= low_freq < nyquist) and (0.0 < high_freq <= nyquist) and (low_freq < high_freq)
462
+ ), "Bad values in options: low-freq {} and high-freq {} vs. nyquist {}".format(low_freq, high_freq, nyquist)
463
+
464
+ # fft-bin width [think of it as Nyquist-freq / half-window-length]
465
+ fft_bin_width = sample_freq / window_length_padded
466
+ mel_low_freq = mel_scale_scalar(low_freq)
467
+ mel_high_freq = mel_scale_scalar(high_freq)
468
+
469
+ # divide by num_bins+1 in next line because of end-effects where the bins
470
+ # spread out to the sides.
471
+ mel_freq_delta = (mel_high_freq - mel_low_freq) / (num_bins + 1)
472
+
473
+ if vtln_high < 0.0:
474
+ vtln_high += nyquist
475
+
476
+ assert vtln_warp_factor == 1.0 or (
477
+ (low_freq < vtln_low < high_freq) and (0.0 < vtln_high < high_freq) and (vtln_low < vtln_high)
478
+ ), "Bad values in options: vtln-low {} and vtln-high {}, versus " "low-freq {} and high-freq {}".format(
479
+ vtln_low, vtln_high, low_freq, high_freq
480
+ )
481
+
482
+ bin = torch.arange(num_bins).unsqueeze(1)
483
+ left_mel = mel_low_freq + bin * mel_freq_delta # size(num_bins, 1)
484
+ center_mel = mel_low_freq + (bin + 1.0) * mel_freq_delta # size(num_bins, 1)
485
+ right_mel = mel_low_freq + (bin + 2.0) * mel_freq_delta # size(num_bins, 1)
486
+
487
+ if vtln_warp_factor != 1.0:
488
+ left_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, left_mel)
489
+ center_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, center_mel)
490
+ right_mel = vtln_warp_mel_freq(vtln_low, vtln_high, low_freq, high_freq, vtln_warp_factor, right_mel)
491
+
492
+ center_freqs = inverse_mel_scale(center_mel) # size (num_bins)
493
+ # size(1, num_fft_bins)
494
+ mel = mel_scale(fft_bin_width * torch.arange(num_fft_bins)).unsqueeze(0)
495
+
496
+ # size (num_bins, num_fft_bins)
497
+ up_slope = (mel - left_mel) / (center_mel - left_mel)
498
+ down_slope = (right_mel - mel) / (right_mel - center_mel)
499
+
500
+ if vtln_warp_factor == 1.0:
501
+ # left_mel < center_mel < right_mel so we can min the two slopes and clamp negative values
502
+ bins = torch.max(torch.zeros(1), torch.min(up_slope, down_slope))
503
+ else:
504
+ # warping can move the order of left_mel, center_mel, right_mel anywhere
505
+ bins = torch.zeros_like(up_slope)
506
+ up_idx = torch.gt(mel, left_mel) & torch.le(mel, center_mel) # left_mel < mel <= center_mel
507
+ down_idx = torch.gt(mel, center_mel) & torch.lt(mel, right_mel) # center_mel < mel < right_mel
508
+ bins[up_idx] = up_slope[up_idx]
509
+ bins[down_idx] = down_slope[down_idx]
510
+
511
+ return bins, center_freqs
512
+
513
+
514
+ def fbank(
515
+ waveform: Tensor,
516
+ blackman_coeff: float = 0.42,
517
+ channel: int = -1,
518
+ dither: float = 0.0,
519
+ energy_floor: float = 1.0,
520
+ frame_length: float = 25.0,
521
+ frame_shift: float = 10.0,
522
+ high_freq: float = 0.0,
523
+ htk_compat: bool = False,
524
+ low_freq: float = 20.0,
525
+ min_duration: float = 0.0,
526
+ num_mel_bins: int = 23,
527
+ preemphasis_coefficient: float = 0.97,
528
+ raw_energy: bool = True,
529
+ remove_dc_offset: bool = True,
530
+ round_to_power_of_two: bool = True,
531
+ sample_frequency: float = 16000.0,
532
+ snip_edges: bool = True,
533
+ subtract_mean: bool = False,
534
+ use_energy: bool = False,
535
+ use_log_fbank: bool = True,
536
+ use_power: bool = True,
537
+ vtln_high: float = -500.0,
538
+ vtln_low: float = 100.0,
539
+ vtln_warp: float = 1.0,
540
+ window_type: str = POVEY,
541
+ ) -> Tensor:
542
+ r"""Create a fbank from a raw audio signal. This matches the input/output of Kaldi's
543
+ compute-fbank-feats.
544
+
545
+ Args:
546
+ waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
547
+ blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
548
+ channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
549
+ dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
550
+ the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
551
+ energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
552
+ this floor is applied to the zeroth component, representing the total signal energy. The floor on the
553
+ individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
554
+ frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
555
+ frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
556
+ high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
557
+ (Default: ``0.0``)
558
+ htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible features
559
+ (need to change other parameters). (Default: ``False``)
560
+ low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
561
+ min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
562
+ num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
563
+ preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
564
+ raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
565
+ remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
566
+ round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
567
+ to FFT. (Default: ``True``)
568
+ sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
569
+ specified there) (Default: ``16000.0``)
570
+ snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
571
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
572
+ depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
573
+ subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
574
+ it this way. (Default: ``False``)
575
+ use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
576
+ use_log_fbank (bool, optional):If true, produce log-filterbank, else produce linear. (Default: ``True``)
577
+ use_power (bool, optional): If true, use power, else use magnitude. (Default: ``True``)
578
+ vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
579
+ negative, offset from high-mel-freq (Default: ``-500.0``)
580
+ vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
581
+ vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
582
+ window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
583
+ (Default: ``'povey'``)
584
+
585
+ Returns:
586
+ Tensor: A fbank identical to what Kaldi would output. The shape is (m, ``num_mel_bins + use_energy``)
587
+ where m is calculated in _get_strided
588
+ """
589
+ device, dtype = waveform.device, waveform.dtype
590
+
591
+ waveform, window_shift, window_size, padded_window_size = _get_waveform_and_window_properties(
592
+ waveform, channel, sample_frequency, frame_shift, frame_length, round_to_power_of_two, preemphasis_coefficient
593
+ )
594
+
595
+ if len(waveform) < min_duration * sample_frequency:
596
+ # signal is too short
597
+ return torch.empty(0, device=device, dtype=dtype)
598
+
599
+ # strided_input, size (m, padded_window_size) and signal_log_energy, size (m)
600
+ strided_input, signal_log_energy = _get_window(
601
+ waveform,
602
+ padded_window_size,
603
+ window_size,
604
+ window_shift,
605
+ window_type,
606
+ blackman_coeff,
607
+ snip_edges,
608
+ raw_energy,
609
+ energy_floor,
610
+ dither,
611
+ remove_dc_offset,
612
+ preemphasis_coefficient,
613
+ )
614
+
615
+ # size (m, padded_window_size // 2 + 1)
616
+ spectrum = torch.fft.rfft(strided_input).abs()
617
+ if use_power:
618
+ spectrum = spectrum.pow(2.0)
619
+
620
+ # size (num_mel_bins, padded_window_size // 2)
621
+ mel_energies, _ = get_mel_banks(
622
+ num_mel_bins, padded_window_size, sample_frequency, low_freq, high_freq, vtln_low, vtln_high, vtln_warp
623
+ )
624
+ mel_energies = mel_energies.to(device=device, dtype=dtype)
625
+
626
+ # pad right column with zeros and add dimension, size (num_mel_bins, padded_window_size // 2 + 1)
627
+ mel_energies = torch.nn.functional.pad(mel_energies, (0, 1), mode="constant", value=0)
628
+
629
+ # sum with mel fiterbanks over the power spectrum, size (m, num_mel_bins)
630
+ mel_energies = torch.mm(spectrum, mel_energies.T)
631
+ if use_log_fbank:
632
+ # avoid log of zero (which should be prevented anyway by dithering)
633
+ mel_energies = torch.max(mel_energies, _get_epsilon(device, dtype)).log()
634
+
635
+ # if use_energy then add it as the last column for htk_compat == true else first column
636
+ if use_energy:
637
+ signal_log_energy = signal_log_energy.unsqueeze(1) # size (m, 1)
638
+ # returns size (m, num_mel_bins + 1)
639
+ if htk_compat:
640
+ mel_energies = torch.cat((mel_energies, signal_log_energy), dim=1)
641
+ else:
642
+ mel_energies = torch.cat((signal_log_energy, mel_energies), dim=1)
643
+
644
+ mel_energies = _subtract_column_mean(mel_energies, subtract_mean)
645
+ return mel_energies
646
+
647
+
648
+ def _get_dct_matrix(num_ceps: int, num_mel_bins: int) -> Tensor:
649
+ # returns a dct matrix of size (num_mel_bins, num_ceps)
650
+ # size (num_mel_bins, num_mel_bins)
651
+ dct_matrix = torchaudio.functional.create_dct(num_mel_bins, num_mel_bins, "ortho")
652
+ # kaldi expects the first cepstral to be weighted sum of factor sqrt(1/num_mel_bins)
653
+ # this would be the first column in the dct_matrix for torchaudio as it expects a
654
+ # right multiply (which would be the first column of the kaldi's dct_matrix as kaldi
655
+ # expects a left multiply e.g. dct_matrix * vector).
656
+ dct_matrix[:, 0] = math.sqrt(1 / float(num_mel_bins))
657
+ dct_matrix = dct_matrix[:, :num_ceps]
658
+ return dct_matrix
659
+
660
+
661
+ def _get_lifter_coeffs(num_ceps: int, cepstral_lifter: float) -> Tensor:
662
+ # returns size (num_ceps)
663
+ # Compute liftering coefficients (scaling on cepstral coeffs)
664
+ # coeffs are numbered slightly differently from HTK: the zeroth index is C0, which is not affected.
665
+ i = torch.arange(num_ceps)
666
+ return 1.0 + 0.5 * cepstral_lifter * torch.sin(math.pi * i / cepstral_lifter)
667
+
668
+
669
+ def mfcc(
670
+ waveform: Tensor,
671
+ blackman_coeff: float = 0.42,
672
+ cepstral_lifter: float = 22.0,
673
+ channel: int = -1,
674
+ dither: float = 0.0,
675
+ energy_floor: float = 1.0,
676
+ frame_length: float = 25.0,
677
+ frame_shift: float = 10.0,
678
+ high_freq: float = 0.0,
679
+ htk_compat: bool = False,
680
+ low_freq: float = 20.0,
681
+ num_ceps: int = 13,
682
+ min_duration: float = 0.0,
683
+ num_mel_bins: int = 23,
684
+ preemphasis_coefficient: float = 0.97,
685
+ raw_energy: bool = True,
686
+ remove_dc_offset: bool = True,
687
+ round_to_power_of_two: bool = True,
688
+ sample_frequency: float = 16000.0,
689
+ snip_edges: bool = True,
690
+ subtract_mean: bool = False,
691
+ use_energy: bool = False,
692
+ vtln_high: float = -500.0,
693
+ vtln_low: float = 100.0,
694
+ vtln_warp: float = 1.0,
695
+ window_type: str = POVEY,
696
+ ) -> Tensor:
697
+ r"""Create a mfcc from a raw audio signal. This matches the input/output of Kaldi's
698
+ compute-mfcc-feats.
699
+
700
+ Args:
701
+ waveform (Tensor): Tensor of audio of size (c, n) where c is in the range [0,2)
702
+ blackman_coeff (float, optional): Constant coefficient for generalized Blackman window. (Default: ``0.42``)
703
+ cepstral_lifter (float, optional): Constant that controls scaling of MFCCs (Default: ``22.0``)
704
+ channel (int, optional): Channel to extract (-1 -> expect mono, 0 -> left, 1 -> right) (Default: ``-1``)
705
+ dither (float, optional): Dithering constant (0.0 means no dither). If you turn this off, you should set
706
+ the energy_floor option, e.g. to 1.0 or 0.1 (Default: ``0.0``)
707
+ energy_floor (float, optional): Floor on energy (absolute, not relative) in Spectrogram computation. Caution:
708
+ this floor is applied to the zeroth component, representing the total signal energy. The floor on the
709
+ individual spectrogram elements is fixed at std::numeric_limits<float>::epsilon(). (Default: ``1.0``)
710
+ frame_length (float, optional): Frame length in milliseconds (Default: ``25.0``)
711
+ frame_shift (float, optional): Frame shift in milliseconds (Default: ``10.0``)
712
+ high_freq (float, optional): High cutoff frequency for mel bins (if <= 0, offset from Nyquist)
713
+ (Default: ``0.0``)
714
+ htk_compat (bool, optional): If true, put energy last. Warning: not sufficient to get HTK compatible
715
+ features (need to change other parameters). (Default: ``False``)
716
+ low_freq (float, optional): Low cutoff frequency for mel bins (Default: ``20.0``)
717
+ num_ceps (int, optional): Number of cepstra in MFCC computation (including C0) (Default: ``13``)
718
+ min_duration (float, optional): Minimum duration of segments to process (in seconds). (Default: ``0.0``)
719
+ num_mel_bins (int, optional): Number of triangular mel-frequency bins (Default: ``23``)
720
+ preemphasis_coefficient (float, optional): Coefficient for use in signal preemphasis (Default: ``0.97``)
721
+ raw_energy (bool, optional): If True, compute energy before preemphasis and windowing (Default: ``True``)
722
+ remove_dc_offset (bool, optional): Subtract mean from waveform on each frame (Default: ``True``)
723
+ round_to_power_of_two (bool, optional): If True, round window size to power of two by zero-padding input
724
+ to FFT. (Default: ``True``)
725
+ sample_frequency (float, optional): Waveform data sample frequency (must match the waveform file, if
726
+ specified there) (Default: ``16000.0``)
727
+ snip_edges (bool, optional): If True, end effects will be handled by outputting only frames that completely fit
728
+ in the file, and the number of frames depends on the frame_length. If False, the number of frames
729
+ depends only on the frame_shift, and we reflect the data at the ends. (Default: ``True``)
730
+ subtract_mean (bool, optional): Subtract mean of each feature file [CMS]; not recommended to do
731
+ it this way. (Default: ``False``)
732
+ use_energy (bool, optional): Add an extra dimension with energy to the FBANK output. (Default: ``False``)
733
+ vtln_high (float, optional): High inflection point in piecewise linear VTLN warping function (if
734
+ negative, offset from high-mel-freq (Default: ``-500.0``)
735
+ vtln_low (float, optional): Low inflection point in piecewise linear VTLN warping function (Default: ``100.0``)
736
+ vtln_warp (float, optional): Vtln warp factor (only applicable if vtln_map not specified) (Default: ``1.0``)
737
+ window_type (str, optional): Type of window ('hamming'|'hanning'|'povey'|'rectangular'|'blackman')
738
+ (Default: ``"povey"``)
739
+
740
+ Returns:
741
+ Tensor: A mfcc identical to what Kaldi would output. The shape is (m, ``num_ceps``)
742
+ where m is calculated in _get_strided
743
+ """
744
+ assert num_ceps <= num_mel_bins, "num_ceps cannot be larger than num_mel_bins: %d vs %d" % (num_ceps, num_mel_bins)
745
+
746
+ device, dtype = waveform.device, waveform.dtype
747
+
748
+ # The mel_energies should not be squared (use_power=True), not have mean subtracted
749
+ # (subtract_mean=False), and use log (use_log_fbank=True).
750
+ # size (m, num_mel_bins + use_energy)
751
+ feature = fbank(
752
+ waveform=waveform,
753
+ blackman_coeff=blackman_coeff,
754
+ channel=channel,
755
+ dither=dither,
756
+ energy_floor=energy_floor,
757
+ frame_length=frame_length,
758
+ frame_shift=frame_shift,
759
+ high_freq=high_freq,
760
+ htk_compat=htk_compat,
761
+ low_freq=low_freq,
762
+ min_duration=min_duration,
763
+ num_mel_bins=num_mel_bins,
764
+ preemphasis_coefficient=preemphasis_coefficient,
765
+ raw_energy=raw_energy,
766
+ remove_dc_offset=remove_dc_offset,
767
+ round_to_power_of_two=round_to_power_of_two,
768
+ sample_frequency=sample_frequency,
769
+ snip_edges=snip_edges,
770
+ subtract_mean=False,
771
+ use_energy=use_energy,
772
+ use_log_fbank=True,
773
+ use_power=True,
774
+ vtln_high=vtln_high,
775
+ vtln_low=vtln_low,
776
+ vtln_warp=vtln_warp,
777
+ window_type=window_type,
778
+ )
779
+
780
+ if use_energy:
781
+ # size (m)
782
+ signal_log_energy = feature[:, num_mel_bins if htk_compat else 0]
783
+ # offset is 0 if htk_compat==True else 1
784
+ mel_offset = int(not htk_compat)
785
+ feature = feature[:, mel_offset : (num_mel_bins + mel_offset)]
786
+
787
+ # size (num_mel_bins, num_ceps)
788
+ dct_matrix = _get_dct_matrix(num_ceps, num_mel_bins).to(dtype=dtype, device=device)
789
+
790
+ # size (m, num_ceps)
791
+ feature = feature.matmul(dct_matrix)
792
+
793
+ if cepstral_lifter != 0.0:
794
+ # size (1, num_ceps)
795
+ lifter_coeffs = _get_lifter_coeffs(num_ceps, cepstral_lifter).unsqueeze(0)
796
+ feature *= lifter_coeffs.to(device=device, dtype=dtype)
797
+
798
+ # if use_energy then replace the last column for htk_compat == true else first column
799
+ if use_energy:
800
+ feature[:, 0] = signal_log_energy
801
+
802
+ if htk_compat:
803
+ energy = feature[:, 0].unsqueeze(1) # size (m, 1)
804
+ feature = feature[:, 1:] # size (m, num_ceps - 1)
805
+ if not use_energy:
806
+ # scale on C0 (actually removing a scale we previously added that's
807
+ # part of one common definition of the cosine transform.)
808
+ energy *= math.sqrt(2)
809
+
810
+ feature = torch.cat((feature, energy), dim=1)
811
+
812
+ feature = _subtract_column_mean(feature, subtract_mean)
813
+ return feature