torchaudio 2.9.1__cp311-cp311-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. torchaudio/__init__.py +204 -0
  2. torchaudio/_extension/__init__.py +61 -0
  3. torchaudio/_extension/utils.py +133 -0
  4. torchaudio/_internal/__init__.py +10 -0
  5. torchaudio/_internal/module_utils.py +171 -0
  6. torchaudio/_torchcodec.py +340 -0
  7. torchaudio/compliance/__init__.py +5 -0
  8. torchaudio/compliance/kaldi.py +813 -0
  9. torchaudio/datasets/__init__.py +47 -0
  10. torchaudio/datasets/cmuarctic.py +157 -0
  11. torchaudio/datasets/cmudict.py +186 -0
  12. torchaudio/datasets/commonvoice.py +86 -0
  13. torchaudio/datasets/dr_vctk.py +121 -0
  14. torchaudio/datasets/fluentcommands.py +108 -0
  15. torchaudio/datasets/gtzan.py +1118 -0
  16. torchaudio/datasets/iemocap.py +147 -0
  17. torchaudio/datasets/librilight_limited.py +111 -0
  18. torchaudio/datasets/librimix.py +133 -0
  19. torchaudio/datasets/librispeech.py +174 -0
  20. torchaudio/datasets/librispeech_biasing.py +189 -0
  21. torchaudio/datasets/libritts.py +168 -0
  22. torchaudio/datasets/ljspeech.py +107 -0
  23. torchaudio/datasets/musdb_hq.py +139 -0
  24. torchaudio/datasets/quesst14.py +136 -0
  25. torchaudio/datasets/snips.py +157 -0
  26. torchaudio/datasets/speechcommands.py +183 -0
  27. torchaudio/datasets/tedlium.py +218 -0
  28. torchaudio/datasets/utils.py +54 -0
  29. torchaudio/datasets/vctk.py +143 -0
  30. torchaudio/datasets/voxceleb1.py +309 -0
  31. torchaudio/datasets/yesno.py +89 -0
  32. torchaudio/functional/__init__.py +130 -0
  33. torchaudio/functional/_alignment.py +128 -0
  34. torchaudio/functional/filtering.py +1685 -0
  35. torchaudio/functional/functional.py +2505 -0
  36. torchaudio/lib/__init__.py +0 -0
  37. torchaudio/lib/_torchaudio.so +0 -0
  38. torchaudio/lib/libtorchaudio.so +0 -0
  39. torchaudio/models/__init__.py +85 -0
  40. torchaudio/models/_hdemucs.py +1008 -0
  41. torchaudio/models/conformer.py +293 -0
  42. torchaudio/models/conv_tasnet.py +330 -0
  43. torchaudio/models/decoder/__init__.py +64 -0
  44. torchaudio/models/decoder/_ctc_decoder.py +568 -0
  45. torchaudio/models/decoder/_cuda_ctc_decoder.py +187 -0
  46. torchaudio/models/deepspeech.py +84 -0
  47. torchaudio/models/emformer.py +884 -0
  48. torchaudio/models/rnnt.py +816 -0
  49. torchaudio/models/rnnt_decoder.py +339 -0
  50. torchaudio/models/squim/__init__.py +11 -0
  51. torchaudio/models/squim/objective.py +326 -0
  52. torchaudio/models/squim/subjective.py +150 -0
  53. torchaudio/models/tacotron2.py +1046 -0
  54. torchaudio/models/wav2letter.py +72 -0
  55. torchaudio/models/wav2vec2/__init__.py +45 -0
  56. torchaudio/models/wav2vec2/components.py +1167 -0
  57. torchaudio/models/wav2vec2/model.py +1579 -0
  58. torchaudio/models/wav2vec2/utils/__init__.py +7 -0
  59. torchaudio/models/wav2vec2/utils/import_fairseq.py +213 -0
  60. torchaudio/models/wav2vec2/utils/import_huggingface.py +134 -0
  61. torchaudio/models/wav2vec2/wavlm_attention.py +214 -0
  62. torchaudio/models/wavernn.py +409 -0
  63. torchaudio/pipelines/__init__.py +102 -0
  64. torchaudio/pipelines/_source_separation_pipeline.py +109 -0
  65. torchaudio/pipelines/_squim_pipeline.py +156 -0
  66. torchaudio/pipelines/_tts/__init__.py +16 -0
  67. torchaudio/pipelines/_tts/impl.py +385 -0
  68. torchaudio/pipelines/_tts/interface.py +255 -0
  69. torchaudio/pipelines/_tts/utils.py +230 -0
  70. torchaudio/pipelines/_wav2vec2/__init__.py +0 -0
  71. torchaudio/pipelines/_wav2vec2/aligner.py +87 -0
  72. torchaudio/pipelines/_wav2vec2/impl.py +1699 -0
  73. torchaudio/pipelines/_wav2vec2/utils.py +346 -0
  74. torchaudio/pipelines/rnnt_pipeline.py +380 -0
  75. torchaudio/transforms/__init__.py +78 -0
  76. torchaudio/transforms/_multi_channel.py +467 -0
  77. torchaudio/transforms/_transforms.py +2138 -0
  78. torchaudio/utils/__init__.py +4 -0
  79. torchaudio/utils/download.py +89 -0
  80. torchaudio/version.py +2 -0
  81. torchaudio-2.9.1.dist-info/METADATA +133 -0
  82. torchaudio-2.9.1.dist-info/RECORD +85 -0
  83. torchaudio-2.9.1.dist-info/WHEEL +5 -0
  84. torchaudio-2.9.1.dist-info/licenses/LICENSE +25 -0
  85. torchaudio-2.9.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1167 @@
1
+ import logging
2
+ from typing import List, Optional, Tuple
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+ from torch.nn import Module, Parameter
7
+
8
+ from .wavlm_attention import WavLMSelfAttention
9
+
10
+ _LG = logging.getLogger(__name__)
11
+
12
+
13
+ def _init_transformer_params(module):
14
+ """
15
+ Initialize the weights of Transformer module in Wav2Vec2/HuBERT.
16
+
17
+ If the module is ``nn.Linear``, normalize the weight with mean 0 and standard deviation 0.02.
18
+ If ``bias`` is set to ``True`` in the module, set ``bias`` to 0.
19
+
20
+ If the module is ``nn.Embedding``, normalize the weight with mean 0 and standard deviation 0.02.
21
+ If ``padding_idx`` is not None, set the weight of padding to 0.
22
+
23
+ Note:
24
+ Ths method corresponds to
25
+ `init_bert_params
26
+ <https://github.com/facebookresearch/fairseq/blob/main/fairseq/modules/transformer_sentence_encoder.py#L21>`__
27
+ in the original ``fairseq`` implementation.
28
+ """
29
+
30
+ def normal_(data):
31
+ data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
32
+
33
+ if isinstance(module, nn.Linear):
34
+ normal_(module.weight.data)
35
+ if module.bias is not None:
36
+ module.bias.data.zero_()
37
+ if isinstance(module, nn.Embedding):
38
+ normal_(module.weight.data)
39
+ if module.padding_idx is not None:
40
+ module.weight.data[module.padding_idx].zero_()
41
+
42
+
43
+ class LayerNorm(nn.LayerNorm):
44
+ """Layer norm with transpose"""
45
+
46
+ def forward(self, input: Tensor) -> Tensor:
47
+ x = input.transpose(-2, -1)
48
+ x = nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
49
+ x = x.transpose(-2, -1)
50
+ return x
51
+
52
+
53
+ class ConvLayerBlock(Module):
54
+ """Convolution unit of FeatureExtractor"""
55
+
56
+ def __init__(
57
+ self,
58
+ in_channels: int,
59
+ out_channels: int,
60
+ kernel_size: int,
61
+ stride: int,
62
+ bias: bool,
63
+ layer_norm: Optional[Module],
64
+ ):
65
+ super().__init__()
66
+ self.kernel_size = kernel_size
67
+ self.stride = stride
68
+ self.layer_norm = layer_norm
69
+ self.conv = nn.Conv1d(
70
+ in_channels=in_channels,
71
+ out_channels=out_channels,
72
+ kernel_size=kernel_size,
73
+ stride=stride,
74
+ bias=bias,
75
+ )
76
+
77
+ def forward(
78
+ self,
79
+ x: Tensor,
80
+ length: Optional[Tensor],
81
+ ) -> Tuple[Tensor, Optional[Tensor]]:
82
+ """
83
+ Args:
84
+ x (Tensor): Shape: ``[batch, in_channels, in_frame]``.
85
+ length (Tensor or None, optional): Shape ``[batch, ]``.
86
+ Returns:
87
+ Tensor: Shape ``[batch, out_channels, out_frames]``.
88
+ Optional[Tensor]: Shape ``[batch, ]``.
89
+ """
90
+ x = self.conv(x)
91
+ if self.layer_norm is not None:
92
+ x = self.layer_norm(x)
93
+ x = nn.functional.gelu(x)
94
+
95
+ if length is not None:
96
+ length = torch.div(length - self.kernel_size, self.stride, rounding_mode="floor") + 1
97
+ # When input length is 0, the resulting length can be negative. So fix it here.
98
+ length = torch.max(torch.zeros_like(length), length)
99
+ return x, length
100
+
101
+
102
+ class FeatureExtractor(Module):
103
+ """Extract features from audio
104
+
105
+ Args:
106
+ conv_layers (nn.ModuleList):
107
+ convolution layers
108
+ """
109
+
110
+ def __init__(
111
+ self,
112
+ conv_layers: nn.ModuleList,
113
+ ):
114
+ super().__init__()
115
+ self.conv_layers = conv_layers
116
+
117
+ def forward(
118
+ self,
119
+ x: Tensor,
120
+ length: Optional[Tensor],
121
+ ) -> Tuple[Tensor, Optional[Tensor]]:
122
+ """
123
+ Args:
124
+ x (Tensor):
125
+ Input Tensor representing a batch of audio,
126
+ shape: ``[batch, time]``.
127
+ length (Tensor or None, optional):
128
+ Valid length of each input sample. shape: ``[batch, ]``.
129
+
130
+ Returns:
131
+ Tensor:
132
+ The resulting feature, shape: ``[batch, frame, feature]``
133
+ Optional[Tensor]:
134
+ Valid length of each output sample. shape: ``[batch, ]``.
135
+ """
136
+ if x.ndim != 2:
137
+ raise ValueError(f"Expected the input Tensor to be 2D (batch, time). Found: {list(x.shape)}")
138
+
139
+ x = x.unsqueeze(1) # (batch, channel==1, frame)
140
+ for layer in self.conv_layers:
141
+ x, length = layer(x, length) # (batch, feature, frame)
142
+ x = x.transpose(1, 2) # (batch, frame, feature)
143
+ return x, length
144
+
145
+
146
+ class FeatureProjection(Module):
147
+ """Layer that connects FeatureExtractor and Encoder
148
+
149
+ Projects features to encoder dimension.
150
+
151
+ Args:
152
+ in_features (int): Input feature dim.
153
+ out_features (int): Output feature dim.
154
+ dropout (float): Dropout probability.
155
+ """
156
+
157
+ def __init__(
158
+ self,
159
+ in_features: int,
160
+ out_features: int,
161
+ dropout: float,
162
+ ):
163
+ super().__init__()
164
+ self.layer_norm = nn.LayerNorm(in_features)
165
+ self.projection = nn.Linear(
166
+ in_features,
167
+ out_features,
168
+ )
169
+ self.dropout = nn.Dropout(dropout)
170
+
171
+ def forward(self, x):
172
+ """
173
+ Args:
174
+ x (Tensor):
175
+ Feature Tensor. shape: ``[batch, frame, in_feature]``
176
+ Returns:
177
+ Tensor: Projected features. ``[batch, frame, out_feature]``.
178
+ """
179
+ x = self.layer_norm(x)
180
+ x = self.projection(x)
181
+ x = self.dropout(x)
182
+ return x
183
+
184
+
185
+ class ConvolutionalPositionalEmbedding(Module):
186
+ """Positional embedding which is placed at the beginning of Transformer.
187
+
188
+ Args:
189
+ embed_dim (int): Feature dimension of the input Tensor.
190
+ kernel_size (int): The number of frames to be use.
191
+ groups (int): The number of groups in feature dimensions.
192
+ """
193
+
194
+ def __init__(
195
+ self,
196
+ embed_dim: int,
197
+ kernel_size: int,
198
+ groups: int,
199
+ ):
200
+ super().__init__()
201
+ self.embed_dim = embed_dim
202
+ self.kernel_size = kernel_size
203
+ self.conv = nn.Conv1d(
204
+ in_channels=embed_dim,
205
+ out_channels=embed_dim,
206
+ kernel_size=kernel_size,
207
+ padding=kernel_size // 2,
208
+ groups=groups,
209
+ )
210
+
211
+ self.conv = nn.utils.parametrizations.weight_norm(self.conv, name="weight", dim=2)
212
+ self.num_remove: int = 1 if kernel_size % 2 == 0 else 0
213
+
214
+ def __prepare_scriptable__(self):
215
+ if self.conv.__class__.__name__ == "ParametrizedConv1d":
216
+ _LG.warning("Removing weight_norm from %s", self.__class__.__name__)
217
+ torch.nn.utils.parametrize.remove_parametrizations(self.conv, "weight")
218
+ return self
219
+
220
+ def forward(self, x):
221
+ """
222
+ Args:
223
+ x (Tensor): shape ``[batch, frame, feature]``.
224
+
225
+ Returns:
226
+ Tensor: The resulting feature. Shape ``[batch, frame, feature]``.
227
+ """
228
+ x = x.transpose(-2, -1)
229
+ x = self.conv(x)
230
+ if self.num_remove > 0:
231
+ x = x[..., : -self.num_remove]
232
+ x = torch.nn.functional.gelu(x)
233
+ x = x.transpose(-2, -1)
234
+ return x
235
+
236
+
237
+ class SelfAttention(Module):
238
+ """Multihead Self Attention module
239
+
240
+ Args:
241
+ embed_dim (int): Total dimension of the model.
242
+ num_heads (int): The number of heads.
243
+ dropout (float, optional):
244
+ Dropout probability on attn_output_weights. Default: ``0.0``
245
+ """
246
+
247
+ def __init__(
248
+ self,
249
+ embed_dim: int,
250
+ num_heads: int,
251
+ dropout: float = 0.0,
252
+ ):
253
+ super().__init__()
254
+ head_dim = embed_dim // num_heads
255
+ if head_dim * num_heads != embed_dim:
256
+ raise ValueError(f"`embed_dim ({embed_dim})` is not divisible by `num_heads ({num_heads})`")
257
+
258
+ self.embed_dim = embed_dim
259
+ self.num_heads = num_heads
260
+ self.dropout = dropout
261
+ self.head_dim = head_dim
262
+
263
+ self.scaling = self.head_dim**-0.5
264
+
265
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=True)
266
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=True)
267
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=True)
268
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True)
269
+
270
+ def forward(
271
+ self,
272
+ x: Tensor,
273
+ attention_mask: Optional[Tensor] = None,
274
+ position_bias: Optional[Tensor] = None,
275
+ key_padding_mask: Optional[Tensor] = None,
276
+ ) -> Tuple[Tensor, Optional[Tensor]]:
277
+ """
278
+ Args:
279
+ x (Tensor): shape: ``[batch_size, sequence_length, embed_dim]``.
280
+ attention_mask (Tensor or ``None``, optional):
281
+ shape: ``[batch_size, 1, sequence_length, sequence_length]``
282
+ position_bias: Not used. Only for the compatibility with :py:class:`WavLMSelfAttention`.
283
+ key_padding_mask (Tensor or ``None``): Not used. Only for the compatibility with
284
+ :py:class:`WavLMSelfAttention`.
285
+ Returns:
286
+ (Tensor, ``None``): The resulting attention output and ``None`` (necessary for compatibility
287
+ with :py:class:`WavLMSelAttention`).
288
+ Attention output shape: ``[batch, sequence_length, embed_dim]``.
289
+ """
290
+ if x.ndim != 3 or x.shape[2] != self.embed_dim:
291
+ raise ValueError(
292
+ f"The expected input shape is (batch, sequence, embed_dim=={self.embed_dim}). " f"Found {x.shape}."
293
+ )
294
+ batch_size, length, embed_dim = x.size()
295
+ if attention_mask is not None:
296
+ shape_ = (batch_size, 1, length, length)
297
+ if attention_mask.size() != shape_:
298
+ raise ValueError(f"The expected attention mask shape is {shape_}. " f"Found {attention_mask.size()}.")
299
+
300
+ shape = (batch_size, length, self.num_heads, self.head_dim)
301
+ q = self.q_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
302
+ k = self.k_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
303
+ v = self.v_proj(x).view(*shape).transpose(2, 1) # B, nH, L, Hd
304
+ dropout = self.dropout if self.training else 0.0
305
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
306
+ q, k, v, attn_mask=attention_mask, dropout_p=dropout, is_causal=False
307
+ )
308
+ attn_output = attn_output.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_dim)
309
+ output = self.out_proj(attn_output)
310
+ return output, None # Necessary for compatibility with WavLMSelAttention
311
+
312
+
313
+ class FeedForward(Module):
314
+ """Layer that follows attention layer in encoder layer."""
315
+
316
+ def __init__(
317
+ self,
318
+ io_features: int,
319
+ intermediate_features: int,
320
+ intermediate_dropout: float,
321
+ output_dropout: float,
322
+ ):
323
+ super().__init__()
324
+ self.intermediate_dense = nn.Linear(io_features, intermediate_features)
325
+ self.intermediate_dropout = nn.Dropout(intermediate_dropout)
326
+ self.output_dense = nn.Linear(intermediate_features, io_features)
327
+ self.output_dropout = nn.Dropout(output_dropout)
328
+
329
+ def forward(self, x):
330
+ """
331
+ Args:
332
+ x (Tensor): shape: `(batch, sequence_length, io_features)`
333
+ Returns:
334
+ x (Tensor): shape: `(batch, sequence_length, io_features)`
335
+ """
336
+ x = self.intermediate_dense(x)
337
+ x = torch.nn.functional.gelu(x)
338
+ x = self.intermediate_dropout(x)
339
+
340
+ x = self.output_dense(x)
341
+ x = self.output_dropout(x)
342
+ return x
343
+
344
+
345
+ class EncoderLayer(Module):
346
+ """A layer unit in encoder. Combines multihead self attention and feed forward."""
347
+
348
+ def __init__(
349
+ self,
350
+ attention: Module,
351
+ dropout: float,
352
+ layer_norm_first: bool,
353
+ feed_forward: Module,
354
+ ):
355
+ super().__init__()
356
+ self.attention = attention
357
+ self.dropout = nn.Dropout(dropout)
358
+ self.layer_norm = nn.LayerNorm(attention.embed_dim)
359
+ self.layer_norm_first = layer_norm_first
360
+ self.feed_forward = feed_forward
361
+ self.final_layer_norm = nn.LayerNorm(attention.embed_dim)
362
+
363
+ def forward(
364
+ self,
365
+ x: Tensor,
366
+ attention_mask: Optional[Tensor] = None,
367
+ position_bias: Optional[Tensor] = None,
368
+ key_padding_mask: Optional[Tensor] = None,
369
+ ) -> Tuple[Tensor, Optional[Tensor]]:
370
+ """
371
+ Args:
372
+ x (Tensor): Input of shape ``(batch, sequence_length, embed_dim)``.
373
+ attention_mask (Tensor or ``None``, optional): attention mask
374
+ of shape ``(batch, 1, sequence_length, sequence_length)``. (Default: ``None``)
375
+ position_bias (Tensor or ``None``, optional): position bias of shape
376
+ ``(batch_size * num_heads, src_len, src_len)``.
377
+ Only necessary for WavLM model, ``None`` otherwise. (Default: ``None``)
378
+ key_padding_mask (Tensor or ``None``, optional): key padding mask of shape ``(batch_size, src_len)``.
379
+ Only used for WavLM model, ignored otherwise. (Default: ``None``)
380
+ Returns:
381
+ (x, position_bias): Shapes are the same as in the input. Position bias is only relevant for WaLM model,
382
+ ``None`` otherwise.
383
+ """
384
+ residual = x
385
+
386
+ if self.layer_norm_first:
387
+ x = self.layer_norm(x)
388
+
389
+ x, position_bias = self.attention(
390
+ x, attention_mask=attention_mask, position_bias=position_bias, key_padding_mask=key_padding_mask
391
+ )
392
+
393
+ x = self.dropout(x)
394
+ x = residual + x
395
+
396
+ if self.layer_norm_first:
397
+ x = x + self.feed_forward(self.final_layer_norm(x))
398
+ else:
399
+ x = self.layer_norm(x)
400
+ x = self.final_layer_norm(x + self.feed_forward(x))
401
+ return x, position_bias
402
+
403
+
404
+ class Transformer(Module):
405
+ def __init__(
406
+ self,
407
+ pos_conv_embed: Module,
408
+ dropout: float,
409
+ layers: Module,
410
+ layer_norm_first: bool,
411
+ layer_drop: float,
412
+ ):
413
+ super().__init__()
414
+ self.pos_conv_embed = pos_conv_embed
415
+ self.layer_norm = nn.LayerNorm(pos_conv_embed.embed_dim)
416
+ self.layer_norm_first = layer_norm_first
417
+ self.layer_drop = layer_drop
418
+ self.dropout = nn.Dropout(dropout)
419
+ self.layers = layers
420
+
421
+ def _preprocess(self, x: Tensor):
422
+ x = x + self.pos_conv_embed(x)
423
+
424
+ if self.layer_norm_first:
425
+ x = self.layer_norm(x)
426
+
427
+ x = self.dropout(x)
428
+ return x
429
+
430
+ def forward(
431
+ self,
432
+ x: Tensor,
433
+ attention_mask: Optional[Tensor] = None,
434
+ position_bias: Optional[Tensor] = None,
435
+ ) -> Tensor:
436
+ x = self._preprocess(x)
437
+ for layer in self.layers:
438
+ if not (self.training and torch.rand(1).item() <= self.layer_drop):
439
+ x, position_bias = layer(x, attention_mask, position_bias=position_bias)
440
+
441
+ if not self.layer_norm_first:
442
+ x = self.layer_norm(x)
443
+ return x
444
+
445
+ def get_intermediate_outputs(
446
+ self,
447
+ x: Tensor,
448
+ attention_mask: Optional[Tensor] = None,
449
+ num_layers: Optional[int] = None,
450
+ ) -> List[Tensor]:
451
+ if num_layers is not None:
452
+ if not 0 < num_layers <= len(self.layers):
453
+ raise ValueError(f"`num_layers` must be between [1, {len(self.layers)}]")
454
+
455
+ ret: List[Tensor] = []
456
+ position_bias = None
457
+ x = self._preprocess(x)
458
+ for layer in self.layers:
459
+ x, position_bias = layer(x, attention_mask, position_bias=position_bias)
460
+ ret.append(x)
461
+ if num_layers is not None and len(ret) >= num_layers:
462
+ return ret
463
+ return ret
464
+
465
+
466
+ class Encoder(Module):
467
+ def __init__(
468
+ self,
469
+ feature_projection: Module,
470
+ transformer: Module,
471
+ ):
472
+ super().__init__()
473
+ self.feature_projection = feature_projection
474
+ self.transformer = transformer
475
+
476
+ def _preprocess(
477
+ self,
478
+ features: Tensor,
479
+ lengths: Optional[Tensor] = None,
480
+ ) -> Tuple[Tensor, Optional[Tensor]]:
481
+ x = self.feature_projection(features)
482
+
483
+ mask: Optional[Tensor] = None
484
+ if lengths is not None:
485
+ batch_size, max_len, _ = x.shape
486
+ # create mask for padded elements and zero-out them
487
+ mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None]
488
+ x[mask] = 0.0
489
+ # extend the mask to attention shape and set weight
490
+ mask = -10000.0 * mask[:, None, None, :].to(dtype=features.dtype)
491
+ mask = mask.expand(batch_size, 1, max_len, max_len)
492
+ return x, mask
493
+
494
+ def forward(
495
+ self,
496
+ features: Tensor,
497
+ lengths: Optional[Tensor] = None,
498
+ ) -> Tensor:
499
+ x, mask = self._preprocess(features, lengths)
500
+ x = self.transformer(x, attention_mask=mask)
501
+ return x
502
+
503
+ def extract_features(
504
+ self,
505
+ features: Tensor,
506
+ lengths: Optional[Tensor] = None,
507
+ num_layers: Optional[int] = None,
508
+ ) -> List[Tensor]:
509
+ x, masks = self._preprocess(features, lengths)
510
+ return self.transformer.get_intermediate_outputs(x, attention_mask=masks, num_layers=num_layers)
511
+
512
+
513
+ ################################################################################
514
+ def _get_feature_extractor(
515
+ norm_mode: str,
516
+ shapes: List[Tuple[int, int, int]],
517
+ bias: bool,
518
+ ) -> FeatureExtractor:
519
+ """
520
+ Args:
521
+ norm_mode (str):
522
+ Either "group_norm" or "layer_norm".
523
+ If "group_norm", then a single normalization is applied
524
+ in the first convolution block. Otherwise, all the convolution
525
+ blocks will have layer normalization.
526
+ This option corresponds to "extractor_mode" from fairseq.
527
+ Expected values are "group_norm" for Base arch, and
528
+ "layer_norm" for Large arch.
529
+ shapes (list of tuple of int):
530
+ Configuration of convolution layers. List of convolution configuration,
531
+ i.e. ``[(output_channel, kernel_size, stride), ...]``
532
+ This option corresponds to "conv_feature_layers" from fairseq.
533
+ Expected values are
534
+ ``[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512, 2, 2)] * 2``
535
+ for all the architectures.
536
+ bias (bool):
537
+ Whether to include bias term to each convolution operation.
538
+ This option corresponds to "conv_bias" from fairseq.
539
+ Expected values are False for Base arch, and True for Large arch.
540
+
541
+ See Also:
542
+ * Original implementation
543
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L666-L733
544
+ * "extractor_mode"
545
+ - Def and base:
546
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L38-L45
547
+ - Large:
548
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L52
549
+ * "conv_feature_layers"
550
+ - Def, base and large:
551
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L94-L100
552
+ * "conv_bias"
553
+ - Def and base:
554
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L101-L103
555
+ - Large:
556
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L61
557
+ """
558
+ if norm_mode not in ["group_norm", "layer_norm"]:
559
+ raise ValueError("Invalid norm mode")
560
+ blocks = []
561
+ in_channels = 1
562
+ for i, (out_channels, kernel_size, stride) in enumerate(shapes):
563
+ normalization = None
564
+ if norm_mode == "group_norm" and i == 0:
565
+ normalization = nn.GroupNorm(
566
+ num_groups=out_channels,
567
+ num_channels=out_channels,
568
+ affine=True,
569
+ )
570
+ elif norm_mode == "layer_norm":
571
+ normalization = LayerNorm(
572
+ normalized_shape=out_channels,
573
+ elementwise_affine=True,
574
+ )
575
+ blocks.append(
576
+ ConvLayerBlock(
577
+ in_channels=in_channels,
578
+ out_channels=out_channels,
579
+ kernel_size=kernel_size,
580
+ stride=stride,
581
+ bias=bias,
582
+ layer_norm=normalization,
583
+ )
584
+ )
585
+ in_channels = out_channels
586
+ return FeatureExtractor(nn.ModuleList(blocks))
587
+
588
+
589
+ def _get_encoder(
590
+ in_features: int,
591
+ embed_dim: int,
592
+ dropout_input: float,
593
+ pos_conv_kernel: int,
594
+ pos_conv_groups: int,
595
+ num_layers: int,
596
+ num_heads: int,
597
+ attention_dropout: float,
598
+ ff_interm_features: int,
599
+ ff_interm_dropout: float,
600
+ dropout: float,
601
+ layer_norm_first: bool,
602
+ layer_drop: float,
603
+ ) -> Encoder:
604
+ """
605
+ Args:
606
+ in_features (int): The number of input features.
607
+ embed_dim (int):
608
+ The dimension of embedding.
609
+ This option corresponds to "encoder_embed_dim" from fairseq.
610
+ Expected values are 768 for Base arch, and 1024 for Large arch.
611
+ dropout_input (float):
612
+ The dropout probability applied after the input feature is projected
613
+ to ``embed_dim``.
614
+ This option corresponds to "dropout_input" from fairseq.
615
+ Expected values are 0.1 for both Base and Large arch.
616
+ pos_conv_kernel (int):
617
+ The kernel size of convolutional positional embeddings.
618
+ This option corresponds to "conv_pos" from fairseq.
619
+ Expected values are 128 for both Base and Large arch.
620
+ pos_conv_groups (int):
621
+ The number of groups of convolutional positional embeddings.
622
+ This option corresponds to "conv_pos_groups" from fairseq.
623
+ Expected values are 16 for both Base and Large arch.
624
+ num_layers (int):
625
+ The number of self attention layers in transformer block.
626
+ This option corresponds to "encoder_layers" from fairseq.
627
+ Expected values are 12 for Base and 24 for Large arch.
628
+ num_heads (int):
629
+ The number of heads in self attention layers.
630
+ This option corresponds to "encoder_attention_heads" from fairseq.
631
+ Expected values are 12 for Base and 16 for Large arch.
632
+ attention_dropout (float):
633
+ The dropout probability applied after softmax in self-attention layer.
634
+ This option corresponds to "attention_dropout" from fairseq.
635
+ Expected values are 0.1 for Base and 0.0 for Large arch.
636
+ ff_interm_features (int):
637
+ The dimension of hidden features in feed forward layer.
638
+ This option corresponds to "encoder_ffn_embed_dim" from fairseq.
639
+ Expected values are 3072 for Base and 4096 for Large arch.
640
+ ff_interm_dropout (float):
641
+ The dropout probability applied in feedforward layer.
642
+ This option correspinds to "activation_dropout" from fairseq.
643
+ Expected values are 0.1 for both Base and Large arch.
644
+ dropout (float):
645
+ The dropout probability applied at the end of feed forward layer.
646
+ This option corresponds to "dropout" from fairseq.
647
+ Expected values are 0.1 for Base and 0.0 for Large arch.
648
+ layer_norm_first (bool):
649
+ Control the order of layer norm in transformer layer and each encoder layer.
650
+ If True, in transformer layer, layer norm is applied before features are fed
651
+ to encoder layers. In encoder layer, two layer norms are applied before and after
652
+ self attention.
653
+ If False, in transformer layer, layer norm is applied after features are fed
654
+ to encoder layers. In encoder layer, two layer norms are applied after self
655
+ attention, before and after feed forward.
656
+ This option corresponds to "layer_norm_first" from fairseq.
657
+ Expected values are False for Base and True for Large arch.
658
+ layer_drop (float):
659
+ Probability to drop each encoder layer during training.
660
+ This option corresponds to "layerdrop" from fairseq.
661
+ Expected values are 0.1 for both Base and Large arch.
662
+
663
+ See Also:
664
+ * "encoder_embed_dim"
665
+ - Def and base
666
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51
667
+ - Large
668
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64
669
+ * "dropout_input"
670
+ - Def, base and large
671
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78
672
+ * "conv_pos"
673
+ - Def, base and large
674
+ NOTE: The description is wrong.
675
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207
676
+ - Usage
677
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756
678
+ * "conv_pos_groups"
679
+ - Def, base and large
680
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211
681
+ * "encoder_layers"
682
+ - Def and base
683
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48
684
+ - Large
685
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63
686
+ * "encoder_attention_heads"
687
+ - Def and base
688
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57
689
+ - Large
690
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66
691
+ * "attention_dropout"
692
+ - Def and base
693
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68
694
+ - Large
695
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60
696
+ * "encoder_ffn_embed_dim"
697
+ - Def and base
698
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54
699
+ - Large
700
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65
701
+ * "activation_dropout"
702
+ - Def
703
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71
704
+ - Base
705
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55
706
+ - Large
707
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55
708
+ * "dropout"
709
+ - Def and base
710
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65
711
+ - Large
712
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59
713
+ * "layer_norm_first"
714
+ - Def and base
715
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93
716
+ - Large
717
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53
718
+ * "layerdrop"
719
+ - Def
720
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74
721
+ - Base
722
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54
723
+ - Large
724
+ https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54
725
+ """
726
+ feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
727
+ pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
728
+
729
+ # Original impl
730
+ # https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
731
+ encoder_layers = nn.ModuleList()
732
+ for _ in range(num_layers):
733
+ attention = SelfAttention(
734
+ embed_dim=embed_dim,
735
+ num_heads=num_heads,
736
+ dropout=attention_dropout,
737
+ )
738
+ feed_forward = FeedForward(
739
+ io_features=embed_dim,
740
+ intermediate_features=ff_interm_features,
741
+ intermediate_dropout=ff_interm_dropout,
742
+ output_dropout=dropout,
743
+ )
744
+ encoder_layers.append(
745
+ EncoderLayer(
746
+ attention=attention,
747
+ dropout=dropout,
748
+ layer_norm_first=layer_norm_first,
749
+ feed_forward=feed_forward,
750
+ )
751
+ )
752
+ transformer = Transformer(
753
+ pos_conv_embed=pos_conv,
754
+ dropout=dropout,
755
+ layers=encoder_layers,
756
+ layer_norm_first=not layer_norm_first,
757
+ layer_drop=layer_drop,
758
+ )
759
+ return Encoder(feature_projection, transformer)
760
+
761
+
762
+ def _get_wavlm_encoder(
763
+ in_features: int,
764
+ embed_dim: int,
765
+ dropout_input: float,
766
+ pos_conv_kernel: int,
767
+ pos_conv_groups: int,
768
+ num_layers: int,
769
+ num_heads: int,
770
+ num_buckets: int,
771
+ max_distance: int,
772
+ attention_dropout: float,
773
+ ff_interm_features: int,
774
+ ff_interm_dropout: float,
775
+ dropout: float,
776
+ layer_norm_first: bool,
777
+ layer_drop: float,
778
+ ) -> Encoder:
779
+ """
780
+ Construct encoder for WavLM model :cite:`chen2022wavlm`. The structure of the encoder and most of the argments are
781
+ the same as in :py:func:`_get_encoder` so refer there for documentation. The only difference from Wav2Vec2 encoder
782
+ is usage of `WavLMSelfAttention` instead of `SelfAttention` and two additional parameters: `num_buckets` and
783
+ `max_distance`.
784
+ Args:
785
+ in_features (int): See :py:func:`_get_encoder`.
786
+ embed_dim (int): See :py:func:`_get_encoder`.
787
+ dropout_input (float): See :py:func:`_get_encoder`.
788
+ pos_conv_kernel (int): See :py:func:`_get_encoder`.
789
+ pos_conv_groups (int): See :py:func:`_get_encoder`.
790
+ num_layers (int): See :py:func:`_get_encoder`.
791
+ num_heads (int): See :py:func:`_get_encoder`.
792
+ num_buckets (int): Number of buckets for relative position embedding.
793
+ max_distance (int): Maximum distance for relative position embedding.
794
+ attention_dropout (float): See :py:func:`_get_encoder`.
795
+ ff_interm_features (int): See :py:func:`_get_encoder`.
796
+ ff_interm_dropout (float): See :py:func:`_get_encoder`.
797
+ dropout (float): See :py:func:`_get_encoder`.
798
+ layer_norm_first (bool): See :py:func:`_get_encoder`.
799
+ layer_drop (float): See :py:func:`_get_encoder`.
800
+
801
+ """
802
+ feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
803
+ pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
804
+
805
+ # Original impl
806
+ # https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
807
+ encoder_layers = nn.ModuleList()
808
+ for i in range(num_layers):
809
+ attention = WavLMSelfAttention(
810
+ embed_dim=embed_dim,
811
+ num_heads=num_heads,
812
+ num_buckets=num_buckets,
813
+ max_distance=max_distance,
814
+ dropout=attention_dropout,
815
+ has_relative_attention_bias=(i == 0), # Position embedding is only necessary in the first layer.
816
+ )
817
+ feed_forward = FeedForward(
818
+ io_features=embed_dim,
819
+ intermediate_features=ff_interm_features,
820
+ intermediate_dropout=ff_interm_dropout,
821
+ output_dropout=dropout,
822
+ )
823
+ encoder_layers.append(
824
+ EncoderLayer(
825
+ attention=attention,
826
+ dropout=dropout,
827
+ layer_norm_first=layer_norm_first,
828
+ feed_forward=feed_forward,
829
+ )
830
+ )
831
+ transformer = Transformer(
832
+ pos_conv_embed=pos_conv,
833
+ dropout=dropout,
834
+ layers=encoder_layers,
835
+ layer_norm_first=not layer_norm_first,
836
+ layer_drop=layer_drop,
837
+ )
838
+ return Encoder(feature_projection, transformer)
839
+
840
+
841
+ def _compute_mask_indices(
842
+ shape: Tuple[int, int],
843
+ padding_mask: Optional[Tensor],
844
+ mask_prob: float,
845
+ mask_length: int,
846
+ mask_type: str = "static",
847
+ mask_other: float = 0.0,
848
+ min_masks: int = 0,
849
+ no_overlap: bool = False,
850
+ min_space: int = 0,
851
+ ) -> Tensor:
852
+ """Computes random mask spans for a given shape.
853
+ Args:
854
+ shape (int, int): The shape for which to compute masks.
855
+ The first element is batch size and second is the number of frames.
856
+ padding_mask (Tensor or None): The padding mask of the same dimension as shape,
857
+ which will prevent masking padded elements.
858
+ mask_prob (float): Probability for each token to be chosen as start of the span to be masked.
859
+ This will be multiplied by number of timesteps divided by length of mask span to mask
860
+ approximately this percentage of all elements. However due to overlaps, the actual number
861
+ will be smaller (unless no_overlap is True).
862
+ mask_type (str): How to compute mask lengths. Options: [``static``, ``uniform``, ``normal``, ``poisson``].
863
+ ``static``: Fixed size
864
+ ``uniform``: Sample from uniform distribution [mask_other, mask_length*2]
865
+ ``normal``: Sample from normal distribution with mean ``mask_length`` and stdev ``mask_other``.
866
+ ``poisson``: Sample from possion distribution with lambda = ``mask_length``.
867
+ min_masks (int): Minimum number of masked spans.
868
+ no_overlap (bool): If false, will switch to an alternative recursive algorithm
869
+ that prevents spans from overlapping.
870
+ min_space (int): How many frames to keep unmasked between spans (Only used if no_overlap is True).
871
+
872
+ Returns:
873
+ (Tensor): The mask indices of dimension `[batch, frame]`.
874
+ """
875
+
876
+ batch_size, frame = shape
877
+ mask = torch.full((batch_size, frame), False)
878
+ # add a random number for probabilistic rounding
879
+ all_num_mask = int(mask_prob * frame / float(mask_length) + torch.rand(1))
880
+
881
+ all_num_mask = max(min_masks, all_num_mask)
882
+
883
+ mask_idcs = []
884
+ for i in range(batch_size):
885
+ if padding_mask is not None:
886
+ sz = frame - padding_mask[i].long().sum().item()
887
+ # add a random number for probabilistic rounding
888
+ num_mask = int(mask_prob * sz / float(mask_length) + torch.rand(1))
889
+ num_mask = max(min_masks, num_mask)
890
+ else:
891
+ sz = frame
892
+ num_mask = all_num_mask
893
+
894
+ if mask_type == "static":
895
+ lengths = torch.full((num_mask,), mask_length)
896
+ elif mask_type == "uniform":
897
+ lengths = torch.randint(int(mask_other), mask_length * 2 + 1, size=(num_mask,))
898
+ elif mask_type == "normal":
899
+ lengths = torch.normal(mask_length, mask_other, size=(num_mask,))
900
+ lengths = torch.maximum(torch.ones(1), torch.round(lengths)).int()
901
+ elif mask_type == "poisson":
902
+ lengths = torch.poisson(mask_length, size=(num_mask,))
903
+ lengths = torch.round(lengths).int()
904
+ else:
905
+ raise Exception(f"unknown mask selection: {mask_type}")
906
+
907
+ if sum(lengths) == 0:
908
+ lengths[0] = min(mask_length, sz - 1)
909
+
910
+ if no_overlap:
911
+ mask_idc = []
912
+
913
+ def arrange(s, e, length, keep_length):
914
+ span_start = torch.randint(s, e - length, size=(1,))
915
+ mask_idc.extend(span_start + i for i in range(length))
916
+
917
+ new_parts = []
918
+ if span_start - s - min_space >= keep_length:
919
+ new_parts.append((s, span_start - min_space + 1))
920
+ if e - span_start - keep_length - min_space > keep_length:
921
+ new_parts.append((span_start + length + min_space, e))
922
+ return new_parts
923
+
924
+ parts = [(0, sz)]
925
+ min_length = min(lengths)
926
+ for length in sorted(lengths, reverse=True):
927
+ lens = torch.tensor([e - s for s, e in parts], dtype=torch.int)
928
+ lens[lens < length + min_space] = 0
929
+ l_sum = lens.sum()
930
+ if l_sum == 0:
931
+ break
932
+ probs = lens / l_sum
933
+ c = torch.distributions.categorical.Categorical(probs).sample()
934
+ s, e = parts.pop(c)
935
+ parts.extend(arrange(s, e, length, min_length))
936
+ mask_idc = torch.tensor(mask_idc)
937
+ else:
938
+ min_len = min(lengths)
939
+ if sz - min_len <= num_mask:
940
+ min_len = sz - num_mask - 1
941
+
942
+ mask_idc = torch.randperm(sz - min_len)[:num_mask]
943
+ mask_idc = torch.tensor(
944
+ [mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j])]
945
+ )
946
+
947
+ mask_idcs.append(torch.unique(mask_idc[mask_idc < sz]))
948
+
949
+ min_len = min([len(m) for m in mask_idcs])
950
+ for i, mask_idc in enumerate(mask_idcs):
951
+ if len(mask_idc) > min_len:
952
+ mask_idc = mask_idc[torch.randperm(len(mask_idc))[:min_len].long()]
953
+ mask[i, mask_idc] = True
954
+
955
+ return mask
956
+
957
+
958
+ def _get_padding_mask(input: Tensor, lengths: Tensor) -> Tensor:
959
+ """Generate the padding mask given the padded input and the lengths Tensors.
960
+ Args:
961
+ input (Tensor): The padded Tensor of dimension `[batch, max_len, frequency]`.
962
+ lengths (Tensor): The lengths Tensor of dimension `[batch,]`.
963
+
964
+ Returns:
965
+ (Tensor): The padding mask.
966
+ """
967
+ batch_size, max_len, _ = input.shape
968
+ mask = torch.arange(max_len, device=lengths.device).expand(batch_size, max_len) >= lengths[:, None]
969
+ return mask
970
+
971
+
972
+ class MaskGenerator(Module):
973
+ """Generate the masks for masked prediction.
974
+ Args:
975
+ encoder_embed_dim (int): The dimension of the transformer embedding output.
976
+ mask_prob (float): Probability for each token to be chosen as start of the span to be masked.
977
+ This will be multiplied by number of timesteps divided by length of mask span to mask
978
+ approximately this percentage of all elements. However due to overlaps, the actual number
979
+ will be smaller (unless no_overlap is True).
980
+ mask_selection (str): How to choose the mask length.
981
+ Options: [``static``, ``uniform``, ``normal``, ``poisson``].
982
+ mask_other (float): Secondary mask argument (used for more complex distributions).
983
+ mask_length (int): The lengths of the mask.
984
+ no_mask_overlap (bool): Whether to allow masks to overlap.
985
+ mask_min_space (int): Minimum space between spans (if no overlap is enabled).
986
+ mask_channel_prob (float): The probability of replacing a feature with 0.
987
+ mask_channel_selection (str): How to choose the mask length for channel masking.
988
+ Options: [``static``, ``uniform``, ``normal``, ``poisson``].
989
+ mask_channel_other (float): Secondary mask argument for channel masking(used for more complex distributions).
990
+ mask_channel_length (int): Minimum space between spans (if no overlap is enabled) for channel masking.
991
+ no_mask_channel_overlap (bool): Whether to allow channel masks to overlap.
992
+ mask_channel_min_space (int): Minimum space between spans for channel masking(if no overlap is enabled).
993
+ """
994
+
995
+ def __init__(
996
+ self,
997
+ encoder_embed_dim: int,
998
+ mask_prob: float,
999
+ mask_selection: str,
1000
+ mask_other: float,
1001
+ mask_length: int,
1002
+ no_mask_overlap: bool,
1003
+ mask_min_space: int,
1004
+ mask_channel_prob: float,
1005
+ mask_channel_selection: str,
1006
+ mask_channel_other: float,
1007
+ mask_channel_length: int,
1008
+ no_mask_channel_overlap: bool,
1009
+ mask_channel_min_space: int,
1010
+ ):
1011
+ super().__init__()
1012
+ self.mask_prob = mask_prob
1013
+ self.mask_selection = mask_selection
1014
+ self.mask_other = mask_other
1015
+ self.mask_length = mask_length
1016
+ self.no_mask_overlap = no_mask_overlap
1017
+ self.mask_min_space = mask_min_space
1018
+ self.mask_channel_prob = mask_channel_prob
1019
+ self.mask_channel_selection = mask_channel_selection
1020
+ self.mask_channel_other = mask_channel_other
1021
+ self.mask_channel_length = mask_channel_length
1022
+ self.no_mask_channel_overlap = no_mask_channel_overlap
1023
+ self.mask_channel_min_space = mask_channel_min_space
1024
+ self.mask_embedding = Parameter(torch.FloatTensor(encoder_embed_dim))
1025
+ torch.nn.init.uniform_(self.mask_embedding)
1026
+
1027
+ def forward(self, x: Tensor, padding_mask: Optional[Tensor]) -> Tensor:
1028
+ """
1029
+ Args:
1030
+ x (Tensor): The encoded representations after feature extraction module.
1031
+ padding_mask (Tensor or None): The padding mask of the same dimension as shape,
1032
+ which will prevent masking padded elements.
1033
+
1034
+ Returns:
1035
+ Tensor: The feature representations after masking.
1036
+ Tensor: The generated mask indices.
1037
+ """
1038
+ B, T, C = x.shape
1039
+ if self.mask_prob > 0:
1040
+ mask_indices = _compute_mask_indices(
1041
+ (B, T),
1042
+ padding_mask,
1043
+ self.mask_prob,
1044
+ self.mask_length,
1045
+ self.mask_selection,
1046
+ self.mask_other,
1047
+ min_masks=2,
1048
+ no_overlap=self.no_mask_overlap,
1049
+ min_space=self.mask_min_space,
1050
+ )
1051
+ mask_indices = mask_indices.to(x.device)
1052
+ # change dtype of mask_embedding to x for mixed-precision training.
1053
+ # see https://github.com/pytorch/audio/issues/2847 for details.
1054
+ x[mask_indices] = self.mask_embedding.to(x.dtype)
1055
+ else:
1056
+ mask_indices = None
1057
+
1058
+ if self.mask_channel_prob > 0:
1059
+ mask_channel_indices = _compute_mask_indices(
1060
+ (B, C),
1061
+ None,
1062
+ self.mask_channel_prob,
1063
+ self.mask_channel_length,
1064
+ self.mask_channel_selection,
1065
+ self.mask_channel_other,
1066
+ no_overlap=self.no_mask_channel_overlap,
1067
+ min_space=self.mask_channel_min_space,
1068
+ )
1069
+ mask_channel_indices = mask_channel_indices.to(x.device).unsqueeze(1).expand(-1, T, -1)
1070
+ x[mask_channel_indices] = 0
1071
+
1072
+ return x, mask_indices
1073
+
1074
+
1075
+ def _compute_logits(
1076
+ proj_x: Tensor,
1077
+ target: Tensor,
1078
+ label_embeddings: Parameter,
1079
+ ) -> Tensor:
1080
+ """Compute the logits of the embeddings.
1081
+ Args:
1082
+ proj_x (Tensor): The projected masked representations of dimension `[batch, frame, final_dim]`.
1083
+ target (Tensor): The target Tensor of dimension `[batch, frame, final_dim]`.
1084
+ label_embeddings (Parameter): The trainable embeddings of target of dimension `[num_class, final_dim]`.
1085
+
1086
+ Returns:
1087
+ (Tensor): The logits of the inputs.
1088
+ """
1089
+ logit_temp = 0.1
1090
+ pos = torch.index_select(label_embeddings, 0, target.long())
1091
+ negs = label_embeddings.unsqueeze(1).expand(-1, proj_x.size(0), -1)
1092
+ neg_is_pos = (pos == negs).all(-1)
1093
+ pos = pos.unsqueeze(0)
1094
+ targets = torch.cat([pos, negs], dim=0)
1095
+
1096
+ logits = torch.cosine_similarity(proj_x.float(), targets.float(), dim=-1).type_as(proj_x)
1097
+ logits /= logit_temp
1098
+ if neg_is_pos.any():
1099
+ logits[1:][neg_is_pos] = float("-inf")
1100
+ logits = logits.transpose(0, 1) # (num_x, num_cls+1)
1101
+ return logits
1102
+
1103
+
1104
+ class LogitGenerator(Module):
1105
+ """Generate the logits of masked and unmasked inputs.
1106
+ Args:
1107
+ encoder_embed_dim (int): The dimension of the transformer embedding output.
1108
+ num_classes (int): The number of classes in the labels.
1109
+ final_dim (int): Project final representations and targets to `final_dim`.
1110
+ skip_masked (bool): If True, skip computing losses over masked frames.
1111
+ skip_nomask (bool): If True, skip computing losses over unmasked frames.
1112
+ """
1113
+
1114
+ def __init__(
1115
+ self,
1116
+ encoder_embed_dim: int,
1117
+ num_classes: int,
1118
+ final_dim: int,
1119
+ skip_masked: bool,
1120
+ skip_nomask: bool,
1121
+ ):
1122
+ super().__init__()
1123
+ self.label_embeddings = Parameter(torch.FloatTensor(num_classes, final_dim))
1124
+ torch.nn.init.uniform_(self.label_embeddings)
1125
+ self.final_proj = torch.nn.Linear(encoder_embed_dim, final_dim)
1126
+ self.skip_masked = skip_masked
1127
+ self.skip_nomask = skip_nomask
1128
+
1129
+ def forward(self, x: Tensor, label: Tensor, mask_m: Tensor, mask_u: Tensor) -> Tuple[Tensor, Tensor]:
1130
+ """
1131
+ Args:
1132
+ x (Tensor): The feature representation of the last transformer layer.
1133
+ label (Tensor): The label Tensor of dimension `[batch, frame]`.
1134
+ mask_m (Tensor): The masked indices of dimension `[batch, frame]`.
1135
+ mask_u (Tensor): The unmasked indices of dimension `[batch, frame]`.
1136
+
1137
+ Returns:
1138
+ Tensor: The logits of masked frames. Tensor of dimension `[masked_frame, final_dim]`.
1139
+ Tensor: The logits of unmasked frames. Tensor of dimension `[unmasked_frame, final_dim]`.
1140
+ """
1141
+ proj_x = self.final_proj(x)
1142
+ if self.skip_masked:
1143
+ logit_m = None
1144
+ else:
1145
+ proj_x_m = proj_x[mask_m]
1146
+ label_m = label[mask_m]
1147
+ logit_m = _compute_logits(proj_x_m, label_m, self.label_embeddings)
1148
+
1149
+ if self.skip_nomask:
1150
+ logit_u = None
1151
+ else:
1152
+ proj_x_u = proj_x[mask_u]
1153
+ label_u = label[mask_u]
1154
+ logit_u = _compute_logits(proj_x_u, label_u, self.label_embeddings)
1155
+ return logit_m, logit_u
1156
+
1157
+
1158
+ class GradMultiply(torch.autograd.Function):
1159
+ @staticmethod
1160
+ def forward(ctx, x, scale):
1161
+ ctx.scale = scale
1162
+ res = x.new(x)
1163
+ return res
1164
+
1165
+ @staticmethod
1166
+ def backward(ctx, grad):
1167
+ return grad * ctx.scale, None