torchaudio 2.9.1__cp310-cp310-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- torchaudio/.dylibs/libc++.1.0.dylib +0 -0
- torchaudio/__init__.py +204 -0
- torchaudio/_extension/__init__.py +61 -0
- torchaudio/_extension/utils.py +133 -0
- torchaudio/_internal/__init__.py +10 -0
- torchaudio/_internal/module_utils.py +171 -0
- torchaudio/_torchcodec.py +340 -0
- torchaudio/compliance/__init__.py +5 -0
- torchaudio/compliance/kaldi.py +813 -0
- torchaudio/datasets/__init__.py +47 -0
- torchaudio/datasets/cmuarctic.py +157 -0
- torchaudio/datasets/cmudict.py +186 -0
- torchaudio/datasets/commonvoice.py +86 -0
- torchaudio/datasets/dr_vctk.py +121 -0
- torchaudio/datasets/fluentcommands.py +108 -0
- torchaudio/datasets/gtzan.py +1118 -0
- torchaudio/datasets/iemocap.py +147 -0
- torchaudio/datasets/librilight_limited.py +111 -0
- torchaudio/datasets/librimix.py +133 -0
- torchaudio/datasets/librispeech.py +174 -0
- torchaudio/datasets/librispeech_biasing.py +189 -0
- torchaudio/datasets/libritts.py +168 -0
- torchaudio/datasets/ljspeech.py +107 -0
- torchaudio/datasets/musdb_hq.py +139 -0
- torchaudio/datasets/quesst14.py +136 -0
- torchaudio/datasets/snips.py +157 -0
- torchaudio/datasets/speechcommands.py +183 -0
- torchaudio/datasets/tedlium.py +218 -0
- torchaudio/datasets/utils.py +54 -0
- torchaudio/datasets/vctk.py +143 -0
- torchaudio/datasets/voxceleb1.py +309 -0
- torchaudio/datasets/yesno.py +89 -0
- torchaudio/functional/__init__.py +130 -0
- torchaudio/functional/_alignment.py +128 -0
- torchaudio/functional/filtering.py +1685 -0
- torchaudio/functional/functional.py +2505 -0
- torchaudio/lib/__init__.py +0 -0
- torchaudio/lib/_torchaudio.so +0 -0
- torchaudio/lib/libtorchaudio.so +0 -0
- torchaudio/models/__init__.py +85 -0
- torchaudio/models/_hdemucs.py +1008 -0
- torchaudio/models/conformer.py +293 -0
- torchaudio/models/conv_tasnet.py +330 -0
- torchaudio/models/decoder/__init__.py +64 -0
- torchaudio/models/decoder/_ctc_decoder.py +568 -0
- torchaudio/models/decoder/_cuda_ctc_decoder.py +187 -0
- torchaudio/models/deepspeech.py +84 -0
- torchaudio/models/emformer.py +884 -0
- torchaudio/models/rnnt.py +816 -0
- torchaudio/models/rnnt_decoder.py +339 -0
- torchaudio/models/squim/__init__.py +11 -0
- torchaudio/models/squim/objective.py +326 -0
- torchaudio/models/squim/subjective.py +150 -0
- torchaudio/models/tacotron2.py +1046 -0
- torchaudio/models/wav2letter.py +72 -0
- torchaudio/models/wav2vec2/__init__.py +45 -0
- torchaudio/models/wav2vec2/components.py +1167 -0
- torchaudio/models/wav2vec2/model.py +1579 -0
- torchaudio/models/wav2vec2/utils/__init__.py +7 -0
- torchaudio/models/wav2vec2/utils/import_fairseq.py +213 -0
- torchaudio/models/wav2vec2/utils/import_huggingface.py +134 -0
- torchaudio/models/wav2vec2/wavlm_attention.py +214 -0
- torchaudio/models/wavernn.py +409 -0
- torchaudio/pipelines/__init__.py +102 -0
- torchaudio/pipelines/_source_separation_pipeline.py +109 -0
- torchaudio/pipelines/_squim_pipeline.py +156 -0
- torchaudio/pipelines/_tts/__init__.py +16 -0
- torchaudio/pipelines/_tts/impl.py +385 -0
- torchaudio/pipelines/_tts/interface.py +255 -0
- torchaudio/pipelines/_tts/utils.py +230 -0
- torchaudio/pipelines/_wav2vec2/__init__.py +0 -0
- torchaudio/pipelines/_wav2vec2/aligner.py +87 -0
- torchaudio/pipelines/_wav2vec2/impl.py +1699 -0
- torchaudio/pipelines/_wav2vec2/utils.py +346 -0
- torchaudio/pipelines/rnnt_pipeline.py +380 -0
- torchaudio/transforms/__init__.py +78 -0
- torchaudio/transforms/_multi_channel.py +467 -0
- torchaudio/transforms/_transforms.py +2138 -0
- torchaudio/utils/__init__.py +4 -0
- torchaudio/utils/download.py +89 -0
- torchaudio/version.py +2 -0
- torchaudio-2.9.1.dist-info/METADATA +133 -0
- torchaudio-2.9.1.dist-info/RECORD +86 -0
- torchaudio-2.9.1.dist-info/WHEEL +5 -0
- torchaudio-2.9.1.dist-info/licenses/LICENSE +25 -0
- torchaudio-2.9.1.dist-info/top_level.txt +1 -0
|
File without changes
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
|
|
2
|
+
from .conformer import Conformer
|
|
3
|
+
from .conv_tasnet import conv_tasnet_base, ConvTasNet
|
|
4
|
+
from .deepspeech import DeepSpeech
|
|
5
|
+
from .emformer import Emformer
|
|
6
|
+
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
|
|
7
|
+
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
|
|
8
|
+
from .squim import (
|
|
9
|
+
squim_objective_base,
|
|
10
|
+
squim_objective_model,
|
|
11
|
+
squim_subjective_base,
|
|
12
|
+
squim_subjective_model,
|
|
13
|
+
SquimObjective,
|
|
14
|
+
SquimSubjective,
|
|
15
|
+
)
|
|
16
|
+
from .tacotron2 import Tacotron2
|
|
17
|
+
from .wav2letter import Wav2Letter
|
|
18
|
+
from .wav2vec2 import (
|
|
19
|
+
hubert_base,
|
|
20
|
+
hubert_large,
|
|
21
|
+
hubert_pretrain_base,
|
|
22
|
+
hubert_pretrain_large,
|
|
23
|
+
hubert_pretrain_model,
|
|
24
|
+
hubert_pretrain_xlarge,
|
|
25
|
+
hubert_xlarge,
|
|
26
|
+
HuBERTPretrainModel,
|
|
27
|
+
wav2vec2_base,
|
|
28
|
+
wav2vec2_large,
|
|
29
|
+
wav2vec2_large_lv60k,
|
|
30
|
+
wav2vec2_model,
|
|
31
|
+
wav2vec2_xlsr_1b,
|
|
32
|
+
wav2vec2_xlsr_2b,
|
|
33
|
+
wav2vec2_xlsr_300m,
|
|
34
|
+
Wav2Vec2Model,
|
|
35
|
+
wavlm_base,
|
|
36
|
+
wavlm_large,
|
|
37
|
+
wavlm_model,
|
|
38
|
+
)
|
|
39
|
+
from .wavernn import WaveRNN
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
__all__ = [
|
|
43
|
+
"Wav2Letter",
|
|
44
|
+
"WaveRNN",
|
|
45
|
+
"ConvTasNet",
|
|
46
|
+
"conv_tasnet_base",
|
|
47
|
+
"DeepSpeech",
|
|
48
|
+
"Wav2Vec2Model",
|
|
49
|
+
"HuBERTPretrainModel",
|
|
50
|
+
"wavlm_model",
|
|
51
|
+
"wavlm_base",
|
|
52
|
+
"wavlm_large",
|
|
53
|
+
"wav2vec2_model",
|
|
54
|
+
"wav2vec2_base",
|
|
55
|
+
"wav2vec2_large",
|
|
56
|
+
"wav2vec2_large_lv60k",
|
|
57
|
+
"hubert_base",
|
|
58
|
+
"hubert_large",
|
|
59
|
+
"hubert_xlarge",
|
|
60
|
+
"hubert_pretrain_model",
|
|
61
|
+
"hubert_pretrain_base",
|
|
62
|
+
"hubert_pretrain_large",
|
|
63
|
+
"hubert_pretrain_xlarge",
|
|
64
|
+
"wav2vec2_xlsr_300m",
|
|
65
|
+
"wav2vec2_xlsr_1b",
|
|
66
|
+
"wav2vec2_xlsr_2b",
|
|
67
|
+
"Tacotron2",
|
|
68
|
+
"Conformer",
|
|
69
|
+
"Emformer",
|
|
70
|
+
"Hypothesis",
|
|
71
|
+
"RNNT",
|
|
72
|
+
"RNNTBeamSearch",
|
|
73
|
+
"emformer_rnnt_base",
|
|
74
|
+
"emformer_rnnt_model",
|
|
75
|
+
"HDemucs",
|
|
76
|
+
"hdemucs_low",
|
|
77
|
+
"hdemucs_medium",
|
|
78
|
+
"hdemucs_high",
|
|
79
|
+
"squim_objective_base",
|
|
80
|
+
"squim_objective_model",
|
|
81
|
+
"squim_subjective_base",
|
|
82
|
+
"squim_subjective_model",
|
|
83
|
+
"SquimObjective",
|
|
84
|
+
"SquimSubjective",
|
|
85
|
+
]
|