torchaudio 2.9.0__cp314-cp314-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of torchaudio might be problematic. Click here for more details.
- torchaudio/.dylibs/libc++.1.0.dylib +0 -0
- torchaudio/__init__.py +204 -0
- torchaudio/_extension/__init__.py +61 -0
- torchaudio/_extension/utils.py +133 -0
- torchaudio/_internal/__init__.py +10 -0
- torchaudio/_internal/module_utils.py +171 -0
- torchaudio/_torchcodec.py +340 -0
- torchaudio/compliance/__init__.py +5 -0
- torchaudio/compliance/kaldi.py +813 -0
- torchaudio/datasets/__init__.py +47 -0
- torchaudio/datasets/cmuarctic.py +157 -0
- torchaudio/datasets/cmudict.py +186 -0
- torchaudio/datasets/commonvoice.py +86 -0
- torchaudio/datasets/dr_vctk.py +121 -0
- torchaudio/datasets/fluentcommands.py +108 -0
- torchaudio/datasets/gtzan.py +1118 -0
- torchaudio/datasets/iemocap.py +147 -0
- torchaudio/datasets/librilight_limited.py +111 -0
- torchaudio/datasets/librimix.py +133 -0
- torchaudio/datasets/librispeech.py +174 -0
- torchaudio/datasets/librispeech_biasing.py +189 -0
- torchaudio/datasets/libritts.py +168 -0
- torchaudio/datasets/ljspeech.py +107 -0
- torchaudio/datasets/musdb_hq.py +139 -0
- torchaudio/datasets/quesst14.py +136 -0
- torchaudio/datasets/snips.py +157 -0
- torchaudio/datasets/speechcommands.py +183 -0
- torchaudio/datasets/tedlium.py +218 -0
- torchaudio/datasets/utils.py +54 -0
- torchaudio/datasets/vctk.py +143 -0
- torchaudio/datasets/voxceleb1.py +309 -0
- torchaudio/datasets/yesno.py +89 -0
- torchaudio/functional/__init__.py +130 -0
- torchaudio/functional/_alignment.py +128 -0
- torchaudio/functional/filtering.py +1685 -0
- torchaudio/functional/functional.py +2505 -0
- torchaudio/lib/__init__.py +0 -0
- torchaudio/lib/_torchaudio.so +0 -0
- torchaudio/lib/libtorchaudio.so +0 -0
- torchaudio/models/__init__.py +85 -0
- torchaudio/models/_hdemucs.py +1008 -0
- torchaudio/models/conformer.py +293 -0
- torchaudio/models/conv_tasnet.py +330 -0
- torchaudio/models/decoder/__init__.py +64 -0
- torchaudio/models/decoder/_ctc_decoder.py +568 -0
- torchaudio/models/decoder/_cuda_ctc_decoder.py +187 -0
- torchaudio/models/deepspeech.py +84 -0
- torchaudio/models/emformer.py +884 -0
- torchaudio/models/rnnt.py +816 -0
- torchaudio/models/rnnt_decoder.py +339 -0
- torchaudio/models/squim/__init__.py +11 -0
- torchaudio/models/squim/objective.py +326 -0
- torchaudio/models/squim/subjective.py +150 -0
- torchaudio/models/tacotron2.py +1046 -0
- torchaudio/models/wav2letter.py +72 -0
- torchaudio/models/wav2vec2/__init__.py +45 -0
- torchaudio/models/wav2vec2/components.py +1167 -0
- torchaudio/models/wav2vec2/model.py +1579 -0
- torchaudio/models/wav2vec2/utils/__init__.py +7 -0
- torchaudio/models/wav2vec2/utils/import_fairseq.py +213 -0
- torchaudio/models/wav2vec2/utils/import_huggingface.py +134 -0
- torchaudio/models/wav2vec2/wavlm_attention.py +214 -0
- torchaudio/models/wavernn.py +409 -0
- torchaudio/pipelines/__init__.py +102 -0
- torchaudio/pipelines/_source_separation_pipeline.py +109 -0
- torchaudio/pipelines/_squim_pipeline.py +156 -0
- torchaudio/pipelines/_tts/__init__.py +16 -0
- torchaudio/pipelines/_tts/impl.py +385 -0
- torchaudio/pipelines/_tts/interface.py +255 -0
- torchaudio/pipelines/_tts/utils.py +230 -0
- torchaudio/pipelines/_wav2vec2/__init__.py +0 -0
- torchaudio/pipelines/_wav2vec2/aligner.py +87 -0
- torchaudio/pipelines/_wav2vec2/impl.py +1699 -0
- torchaudio/pipelines/_wav2vec2/utils.py +346 -0
- torchaudio/pipelines/rnnt_pipeline.py +380 -0
- torchaudio/transforms/__init__.py +78 -0
- torchaudio/transforms/_multi_channel.py +467 -0
- torchaudio/transforms/_transforms.py +2138 -0
- torchaudio/utils/__init__.py +4 -0
- torchaudio/utils/download.py +89 -0
- torchaudio/version.py +2 -0
- torchaudio-2.9.0.dist-info/LICENSE +25 -0
- torchaudio-2.9.0.dist-info/METADATA +122 -0
- torchaudio-2.9.0.dist-info/RECORD +86 -0
- torchaudio-2.9.0.dist-info/WHEEL +5 -0
- torchaudio-2.9.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import hashlib
|
|
2
|
+
import logging
|
|
3
|
+
from os import PathLike
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Union
|
|
6
|
+
|
|
7
|
+
import torch
|
|
8
|
+
from torchaudio._internal import download_url_to_file
|
|
9
|
+
|
|
10
|
+
_LG = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _get_local_path(key):
|
|
14
|
+
path = Path(torch.hub.get_dir()) / "torchaudio" / Path(key)
|
|
15
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
16
|
+
return path
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _download(key, path, progress):
|
|
20
|
+
url = f"https://download.pytorch.org/torchaudio/{key}"
|
|
21
|
+
download_url_to_file(url, path, progress=progress)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _get_hash(path, hash, chunk_size=1028):
|
|
25
|
+
m = hashlib.sha256()
|
|
26
|
+
with open(path, "rb") as file:
|
|
27
|
+
data = file.read(chunk_size)
|
|
28
|
+
while data:
|
|
29
|
+
m.update(data)
|
|
30
|
+
data = file.read(chunk_size)
|
|
31
|
+
return m.hexdigest()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _download_asset(
|
|
35
|
+
key: str,
|
|
36
|
+
hash: str = "",
|
|
37
|
+
path: Union[str, PathLike] = "",
|
|
38
|
+
*,
|
|
39
|
+
progress: bool = True,
|
|
40
|
+
) -> str:
|
|
41
|
+
"""Download and store torchaudio assets to local file system.
|
|
42
|
+
|
|
43
|
+
If a file exists at the download path, then that path is returned with or without
|
|
44
|
+
hash validation.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
key (str): The asset identifier.
|
|
48
|
+
hash (str, optional):
|
|
49
|
+
The value of SHA256 hash of the asset. If provided, it is used to verify
|
|
50
|
+
the downloaded / cached object. If not provided, then no hash validation
|
|
51
|
+
is performed. This means if a file exists at the download path, then the path
|
|
52
|
+
is returned as-is without verifying the identity of the file.
|
|
53
|
+
path (path-like object, optional):
|
|
54
|
+
By default, the downloaded asset is saved in a directory under
|
|
55
|
+
:py:func:`torch.hub.get_dir` and intermediate directories based on the given `key`
|
|
56
|
+
are created.
|
|
57
|
+
This argument can be used to overwrite the target location.
|
|
58
|
+
When this argument is provided, all the intermediate directories have to be
|
|
59
|
+
created beforehand.
|
|
60
|
+
progress (bool): Whether to show progress bar for downloading. Default: ``True``.
|
|
61
|
+
|
|
62
|
+
Note:
|
|
63
|
+
Currently the valid key values are the route on ``download.pytorch.org/torchaudio``,
|
|
64
|
+
but this is an implementation detail.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
str: The path to the asset on the local file system.
|
|
68
|
+
"""
|
|
69
|
+
path = path or _get_local_path(key)
|
|
70
|
+
|
|
71
|
+
if path.exists():
|
|
72
|
+
_LG.info("The local file (%s) exists. Skipping the download.", path)
|
|
73
|
+
else:
|
|
74
|
+
_LG.info("Downloading %s to %s", key, path)
|
|
75
|
+
_download(key, path, progress=progress)
|
|
76
|
+
|
|
77
|
+
if hash:
|
|
78
|
+
_LG.info("Verifying the hash value.")
|
|
79
|
+
digest = _get_hash(path, hash)
|
|
80
|
+
|
|
81
|
+
if digest != hash:
|
|
82
|
+
raise ValueError(
|
|
83
|
+
f"The hash value of the downloaded file ({path}), '{digest}' does not match "
|
|
84
|
+
f"the provided hash value, '{hash}'."
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
_LG.info("Hash validated.")
|
|
88
|
+
|
|
89
|
+
return str(path)
|
torchaudio/version.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
BSD 2-Clause License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2017 Facebook Inc. (Soumith Chintala),
|
|
4
|
+
All rights reserved.
|
|
5
|
+
|
|
6
|
+
Redistribution and use in source and binary forms, with or without
|
|
7
|
+
modification, are permitted provided that the following conditions are met:
|
|
8
|
+
|
|
9
|
+
* Redistributions of source code must retain the above copyright notice, this
|
|
10
|
+
list of conditions and the following disclaimer.
|
|
11
|
+
|
|
12
|
+
* Redistributions in binary form must reproduce the above copyright notice,
|
|
13
|
+
this list of conditions and the following disclaimer in the documentation
|
|
14
|
+
and/or other materials provided with the distribution.
|
|
15
|
+
|
|
16
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
17
|
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
18
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
19
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
20
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
21
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
22
|
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
23
|
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
24
|
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
25
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: torchaudio
|
|
3
|
+
Version: 2.9.0
|
|
4
|
+
Summary: An audio package for PyTorch
|
|
5
|
+
Home-page: https://github.com/pytorch/audio
|
|
6
|
+
Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang
|
|
7
|
+
Author-email: soumith@pytorch.org
|
|
8
|
+
Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang
|
|
9
|
+
Maintainer-email: moto@meta.com
|
|
10
|
+
Classifier: Environment :: Plugins
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: BSD License
|
|
14
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
15
|
+
Classifier: Operating System :: Microsoft :: Windows
|
|
16
|
+
Classifier: Operating System :: POSIX
|
|
17
|
+
Classifier: Programming Language :: C++
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
23
|
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
24
|
+
Classifier: Topic :: Multimedia :: Sound/Audio
|
|
25
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
26
|
+
Description-Content-Type: text/markdown
|
|
27
|
+
License-File: LICENSE
|
|
28
|
+
Requires-Dist: torch ==2.9.0
|
|
29
|
+
|
|
30
|
+
torchaudio: an audio library for PyTorch
|
|
31
|
+
========================================
|
|
32
|
+
|
|
33
|
+
[](https://pytorch.org/audio/main/)
|
|
34
|
+
[](https://anaconda.org/pytorch/torchaudio)
|
|
35
|
+
[](https://anaconda.org/pytorch/torchaudio)
|
|
36
|
+
|
|
37
|
+

|
|
38
|
+
|
|
39
|
+
> [!NOTE]
|
|
40
|
+
> **We have transitioned TorchAudio into a
|
|
41
|
+
> maintenance phase. This process removed some user-facing
|
|
42
|
+
> features. These features were deprecated from TorchAudio 2.8 and removed in 2.9.
|
|
43
|
+
> Our main goals were to reduce redundancies with the rest of the
|
|
44
|
+
> PyTorch ecosystem, make it easier to maintain, and create a version of
|
|
45
|
+
> TorchAudio that is more tightly scoped to its strengths: processing audio
|
|
46
|
+
> data for ML. Please see
|
|
47
|
+
> [our community message](https://github.com/pytorch/audio/issues/3902)
|
|
48
|
+
> for more details.**
|
|
49
|
+
|
|
50
|
+
The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to
|
|
51
|
+
the audio domain. By supporting PyTorch, torchaudio follows the same philosophy
|
|
52
|
+
of providing strong GPU acceleration, having a focus on trainable features through
|
|
53
|
+
the autograd system, and having consistent style (tensor names and dimension names).
|
|
54
|
+
Therefore, it is primarily a machine learning library and not a general signal
|
|
55
|
+
processing library. The benefits of PyTorch can be seen in torchaudio through
|
|
56
|
+
having all the computations be through PyTorch operations which makes it easy
|
|
57
|
+
to use and feel like a natural extension.
|
|
58
|
+
|
|
59
|
+
- [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html)
|
|
60
|
+
- Audio and speech processing functions
|
|
61
|
+
- [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html)
|
|
62
|
+
- Common audio transforms
|
|
63
|
+
- [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html)
|
|
64
|
+
- Compliance interfaces: Run code using PyTorch that align with other libraries
|
|
65
|
+
- [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html)
|
|
66
|
+
|
|
67
|
+
Installation
|
|
68
|
+
------------
|
|
69
|
+
|
|
70
|
+
Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio.
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
API Reference
|
|
74
|
+
-------------
|
|
75
|
+
|
|
76
|
+
API Reference is located here: http://pytorch.org/audio/main/
|
|
77
|
+
|
|
78
|
+
Contributing Guidelines
|
|
79
|
+
-----------------------
|
|
80
|
+
|
|
81
|
+
Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md)
|
|
82
|
+
|
|
83
|
+
Citation
|
|
84
|
+
--------
|
|
85
|
+
|
|
86
|
+
If you find this package useful, please cite as:
|
|
87
|
+
|
|
88
|
+
```bibtex
|
|
89
|
+
@article{yang2021torchaudio,
|
|
90
|
+
title={TorchAudio: Building Blocks for Audio and Speech Processing},
|
|
91
|
+
author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi},
|
|
92
|
+
journal={arXiv preprint arXiv:2110.15018},
|
|
93
|
+
year={2021}
|
|
94
|
+
}
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
```bibtex
|
|
98
|
+
@misc{hwang2023torchaudio,
|
|
99
|
+
title={TorchAudio 2.1: Advancing speech recognition, self-supervised learning, and audio processing components for PyTorch},
|
|
100
|
+
author={Jeff Hwang and Moto Hira and Caroline Chen and Xiaohui Zhang and Zhaoheng Ni and Guangzhi Sun and Pingchuan Ma and Ruizhe Huang and Vineel Pratap and Yuekai Zhang and Anurag Kumar and Chin-Yun Yu and Chuang Zhu and Chunxi Liu and Jacob Kahn and Mirco Ravanelli and Peng Sun and Shinji Watanabe and Yangyang Shi and Yumeng Tao and Robin Scheibler and Samuele Cornell and Sean Kim and Stavros Petridis},
|
|
101
|
+
year={2023},
|
|
102
|
+
eprint={2310.17864},
|
|
103
|
+
archivePrefix={arXiv},
|
|
104
|
+
primaryClass={eess.AS}
|
|
105
|
+
}
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
Disclaimer on Datasets
|
|
109
|
+
----------------------
|
|
110
|
+
|
|
111
|
+
This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license.
|
|
112
|
+
|
|
113
|
+
If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community!
|
|
114
|
+
|
|
115
|
+
Pre-trained Model License
|
|
116
|
+
-------------------------
|
|
117
|
+
|
|
118
|
+
The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case.
|
|
119
|
+
|
|
120
|
+
For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details.
|
|
121
|
+
|
|
122
|
+
Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/).
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
torchaudio-2.9.0.dist-info/RECORD,,
|
|
2
|
+
torchaudio-2.9.0.dist-info/LICENSE,sha256=k6WIYahYzBCOa2uDPgjnbosqZjOeSoAHyKWowf-cQNY,1338
|
|
3
|
+
torchaudio-2.9.0.dist-info/WHEEL,sha256=AIWYNMd7991pcMFgDWgbcuDB5q33MR8gQ54NDz3i1RU,109
|
|
4
|
+
torchaudio-2.9.0.dist-info/top_level.txt,sha256=mPKWMIRWWW2JwbJN6wRckeN1gpbjhifapAF0Z9t7SMo,11
|
|
5
|
+
torchaudio-2.9.0.dist-info/METADATA,sha256=ygAM9_HG0s1iYrA3X6SLUDTX-QkiDZm6H91eEFlafgI,6672
|
|
6
|
+
torchaudio/_torchcodec.py,sha256=Z1TpONctbL80DufuWhLRj4dC0rVhjKu6hOYeglcLwvU,13424
|
|
7
|
+
torchaudio/version.py,sha256=WTITTd5_jGUXzppoWPQ1qWzgXa7LiuTEZCpL_v8XLNk,79
|
|
8
|
+
torchaudio/__init__.py,sha256=8OB3EPGCViF7LgBWy_bUyZUF6HJUIpbTI8ouRGwn6lU,7878
|
|
9
|
+
torchaudio/_internal/__init__.py,sha256=gjU8g9HhVd9hHrHXJM0xOlZL6cT8ktO60MN8RHI6ZbA,241
|
|
10
|
+
torchaudio/_internal/module_utils.py,sha256=eosQSGtN5WhHhATJGBWJIGUM_nvtgLPRkQ8BH_Zd53o,5229
|
|
11
|
+
torchaudio/datasets/speechcommands.py,sha256=cLSgiVYlQjEOuYPpFeAtcXSGirraH4IMoP8p9WIvUoY,7481
|
|
12
|
+
torchaudio/datasets/librilight_limited.py,sha256=fAwpX0hEMze5aV57BP7rjBLwRiZa3Aje_NXi_3o16wA,4179
|
|
13
|
+
torchaudio/datasets/dr_vctk.py,sha256=Km4-tKllAgnOKCuq66YRWhTlNWmC7D0Xz3dAttRRGSo,4377
|
|
14
|
+
torchaudio/datasets/cmudict.py,sha256=9OEpNDYpyqeEyinAnyGIU8FampDj7ziSOHRwJLIlq2M,5990
|
|
15
|
+
torchaudio/datasets/gtzan.py,sha256=I5dRP_QGuQ1joXWRwZwtvpwi22uZTb8QZm9Mr2W55Mg,24357
|
|
16
|
+
torchaudio/datasets/cmuarctic.py,sha256=2e5Oh_jDHRs8ORhNONsD9NhI_OfQSHDLQAM-tWpgZ-U,7081
|
|
17
|
+
torchaudio/datasets/musdb_hq.py,sha256=TYKjpat6JKr9bkFqUecu7_hRdshRfQP2UbknaYR3Q0U,5075
|
|
18
|
+
torchaudio/datasets/snips.py,sha256=WaYUknGFM3rnLklOj5ZYHSX5mhlf_Ce4p3LBZdA9yJc,5008
|
|
19
|
+
torchaudio/datasets/commonvoice.py,sha256=9khedUCmdEkCKPU6_r8VWz6I2VdJokatuziZ6BxJMZs,2763
|
|
20
|
+
torchaudio/datasets/librispeech_biasing.py,sha256=d-02tyrXI-CSGbXBFYFcnM_yT8WSGABHfpNiFxyadL0,6958
|
|
21
|
+
torchaudio/datasets/__init__.py,sha256=taRr3duDaEK1Pfzj9N1dFuZpXfy8e4uFItcJiRLAQwQ,1171
|
|
22
|
+
torchaudio/datasets/libritts.py,sha256=EtWOoCDz7_qGLZF5YcZfnHaLxH4Y8QJCnopafLiqFno,5870
|
|
23
|
+
torchaudio/datasets/voxceleb1.py,sha256=9vU0ftB4-2usO8ZiEUKR_IQTEdHhA0M8l9scXCNehnw,11725
|
|
24
|
+
torchaudio/datasets/tedlium.py,sha256=a8Hf2QvOki7_chgXcMAFMk-piTjodktfnc3HRbUVJkU,8698
|
|
25
|
+
torchaudio/datasets/utils.py,sha256=P6nckh2YrAfOPMphHlxyfI-HBmNg39DTlxQ8-asG4MY,1703
|
|
26
|
+
torchaudio/datasets/ljspeech.py,sha256=92NeLQsC1iKpqfiMkKKbcJDpaYdZKVdVEBQJze1wmxY,3494
|
|
27
|
+
torchaudio/datasets/yesno.py,sha256=4sgfMeSxz8HaRDk6A2UIFP-20q29MwEO_r8DoEtfbvE,3026
|
|
28
|
+
torchaudio/datasets/fluentcommands.py,sha256=u3tkO4-AAaTWdbRQi6lIvad4x2plZgXM39KljGtmRsw,3245
|
|
29
|
+
torchaudio/datasets/librispeech.py,sha256=zkzJFWchWs4AktYAI-ghmWH4ZeJ84C0uDo9E1_pTgSI,6308
|
|
30
|
+
torchaudio/datasets/iemocap.py,sha256=X_WCoXOzRqcWRRRoUtY0AlD9SJcUUOACIcgbV0irt48,4930
|
|
31
|
+
torchaudio/datasets/librimix.py,sha256=VtKOhf6VJc1ysWCvUvh0SbtjOkXJChmBM_BhoSkg_2A,5116
|
|
32
|
+
torchaudio/datasets/quesst14.py,sha256=QyGd4fMS820ATbP8YgBtu7bSSK09pw5RZklsPJ8Jf0Y,4455
|
|
33
|
+
torchaudio/datasets/vctk.py,sha256=twR_n8LyQcT8A_HrJoMx3RkaVrRXXZAnIVU1d0E0npQ,5699
|
|
34
|
+
torchaudio/pipelines/_source_separation_pipeline.py,sha256=ogWakvaOv6OegmREcbagvfIm0jNWjzEtsdMYTialRNk,4225
|
|
35
|
+
torchaudio/pipelines/__init__.py,sha256=Xy8NmInKwTcNBHwLTTjHjrfczRLuQq8a67ENt1OTVXM,2745
|
|
36
|
+
torchaudio/pipelines/rnnt_pipeline.py,sha256=56nQnCcjY4xewDqXR1Rkrh_hyoK42CsYumpU8mUNs1w,13753
|
|
37
|
+
torchaudio/pipelines/_squim_pipeline.py,sha256=852SYXqUZDgTPegL7LqgVQr0PXG94da_DTDF2bwDhVE,6282
|
|
38
|
+
torchaudio/pipelines/_wav2vec2/aligner.py,sha256=pIWRgQ-kdYUxtL8bdc0qk9wBjwRrHY1uSWL3L4e2vxs,2709
|
|
39
|
+
torchaudio/pipelines/_wav2vec2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
40
|
+
torchaudio/pipelines/_wav2vec2/utils.py,sha256=Q8_fWOR2JDnHu0TTRmHzRjI3BOJa0hGIAl0cjtALgsQ,6971
|
|
41
|
+
torchaudio/pipelines/_wav2vec2/impl.py,sha256=zdXFjytJO5MvnB-3aygzUUFKxCTkQGU_OX_rhUh9c0k,65561
|
|
42
|
+
torchaudio/pipelines/_tts/interface.py,sha256=yUaS0UK3PTRruYXRWFil7lAhr-1iYiyBaDBLmEnJPUQ,10224
|
|
43
|
+
torchaudio/pipelines/_tts/__init__.py,sha256=PP7l8XzVURqelwuMJFgfOCv4fvzZunDiy90ZQlRkv7g,426
|
|
44
|
+
torchaudio/pipelines/_tts/utils.py,sha256=KGrFoetCZ4l4FJkINFptAc8Pvrbo9e4QQhCIMCp8NYY,4810
|
|
45
|
+
torchaudio/pipelines/_tts/impl.py,sha256=Tig4_5sITJADwxN5eZGek7Ath_-e3sV8CTM5t6UpeUU,15374
|
|
46
|
+
torchaudio/utils/download.py,sha256=gZA7CijUoAu3Q0Qd6dKpFQAEjcdnxR6xOT59lTgEIOo,2883
|
|
47
|
+
torchaudio/utils/__init__.py,sha256=adAdfYm9DJBC2JXxRCTrjxOUU1vKJ9w3rFke-DzKKqU,70
|
|
48
|
+
torchaudio/models/wav2letter.py,sha256=KNcq4p0qZG2Bwfdakv7YwLCvi_yGT-qB4fJwGMuFQhg,3278
|
|
49
|
+
torchaudio/models/emformer.py,sha256=ncDeEcYegUmIKQoDBoufUhVWj4dYpZAXxLX0qmEqt1A,37766
|
|
50
|
+
torchaudio/models/tacotron2.py,sha256=FimYhGSI8FKwWb87CLk4h3yKWatCU2HvFmU1t5WUn4E,45914
|
|
51
|
+
torchaudio/models/conv_tasnet.py,sha256=v-DI_Ej9FCBBbSH-Spkh3tzq8rkBhbQNA-Wp52Uf32E,12540
|
|
52
|
+
torchaudio/models/rnnt_decoder.py,sha256=IwlDsuw1SA-uCRrXGMBqm05auGFSha2bZ-8BOImnK0c,12839
|
|
53
|
+
torchaudio/models/__init__.py,sha256=BNMNGuwpJAFRsdtwHYQ6slGClkrUTu31_7mXh7FjeV4,1995
|
|
54
|
+
torchaudio/models/deepspeech.py,sha256=kQW3B6YcjYuq7xRzWjRJFGr7ZNraY9gMYDTxII7Cgtg,2746
|
|
55
|
+
torchaudio/models/_hdemucs.py,sha256=VPnQ73lA9lfAxRjZ85NCGJYP36mPNwTjS-TU4qelu_k,38242
|
|
56
|
+
torchaudio/models/conformer.py,sha256=5IceU-jcZKofkHTTqRKoytubQ75MzZPrPlfkLsIlxeA,10068
|
|
57
|
+
torchaudio/models/wavernn.py,sha256=5xUyao5g69jRXX4ReNi4mP_aTSIonJPP6XcPrqKybEk,15446
|
|
58
|
+
torchaudio/models/rnnt.py,sha256=jz66nwDd1qGT6KQR1lbA_urPktygewhm0FH66T7P3Ek,35541
|
|
59
|
+
torchaudio/models/squim/objective.py,sha256=gvUasz7RpqgKeGf04yHUotshSIzH3KzjW90-iHeDo2g,12281
|
|
60
|
+
torchaudio/models/squim/subjective.py,sha256=N00kILSPm0akWyNsrNYKmHgZmooo8gbyUm5IVLf7bx8,5797
|
|
61
|
+
torchaudio/models/squim/__init__.py,sha256=b98nAaL28Q4w3lrqd_6wUd0An-xNhhJn4Tj8oZlzQnc,346
|
|
62
|
+
torchaudio/models/decoder/_ctc_decoder.py,sha256=AmLQAcm4Q4bFPqnq-SF7Lpvg2QPK88xyio8ol_OJjvU,20086
|
|
63
|
+
torchaudio/models/decoder/__init__.py,sha256=HxU2Bgyea0No8SORRfxgMZNwwEDTrjlT3bDW_GxzpTU,1899
|
|
64
|
+
torchaudio/models/decoder/_cuda_ctc_decoder.py,sha256=xFrj1cTEsS-MxAO5Vgdutcb3kTb7Jv-OFhS6cmfFKhA,7186
|
|
65
|
+
torchaudio/models/wav2vec2/wavlm_attention.py,sha256=1DU_pkoLCeHQwSF4lJ06cez0PsMVoXNxiYKP0Yv0qFQ,10844
|
|
66
|
+
torchaudio/models/wav2vec2/__init__.py,sha256=WlafukV6GwuSNh0CZifrYUt4V5l59kjvGX7AZNonjfk,927
|
|
67
|
+
torchaudio/models/wav2vec2/model.py,sha256=Z2VN6KbDOOdq5JtP7lxPQebwYqsxKms1Eu4IjDJtZaQ,60092
|
|
68
|
+
torchaudio/models/wav2vec2/components.py,sha256=DRmW-GHYf-JReCg_0l1ovNWJBnAavePO3S2vPY-1ze4,47077
|
|
69
|
+
torchaudio/models/wav2vec2/utils/__init__.py,sha256=qmMbz4HAN5kEEyl4cSGm_JQZI47beyh4witydPC_qns,181
|
|
70
|
+
torchaudio/models/wav2vec2/utils/import_huggingface.py,sha256=1nVCipp-lOUAyl_-P103DWLUeTOZi9X_ffX93bOXxEk,5946
|
|
71
|
+
torchaudio/models/wav2vec2/utils/import_fairseq.py,sha256=oCwG6qpG0bCXue2V56fjDcC8cA2rgy4b3O_nu_FI9ZY,9198
|
|
72
|
+
torchaudio/compliance/__init__.py,sha256=hhNObUS0c-fS-VMudM7zl3-CvupvCDmESlikntSMn5g,48
|
|
73
|
+
torchaudio/compliance/kaldi.py,sha256=XL6hpYTd6nSPb2imIdeU4TM06I2fqh1AmG968y8ZbSk,36666
|
|
74
|
+
torchaudio/.dylibs/libc++.1.0.dylib,sha256=wuXGTI0pkLyitB9973AjISe0ogS6pBg4wL7QnP8XuAA,1173408
|
|
75
|
+
torchaudio/transforms/__init__.py,sha256=8_47qPRjXNg332f2kcNP_T5UXCn6jQmUUMkIgyIByjY,1398
|
|
76
|
+
torchaudio/transforms/_transforms.py,sha256=i-xEARqCfnaDk9b0yzmYkPo9Gg1N1iKvZiLSMdX14-Q,86919
|
|
77
|
+
torchaudio/transforms/_multi_channel.py,sha256=GZ2rrwFt2KtSG7At7kS9Bqh1KmYYw0HwcUnEjc-AWr8,22221
|
|
78
|
+
torchaudio/lib/libtorchaudio.so,sha256=jh_q1xvbwMDgP8bLFEuy1aJVedmTtsdSJM0qkxjThKU,496800
|
|
79
|
+
torchaudio/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
80
|
+
torchaudio/lib/_torchaudio.so,sha256=-PbOTXZBl9Ai3DGsosQrIfCKhZd-vz9eTz7cUgUA5w0,214144
|
|
81
|
+
torchaudio/_extension/__init__.py,sha256=A8oH7eF2Fx4d68LddkFE1Ylq3AE3X2sgZdXjvaMEdjQ,1905
|
|
82
|
+
torchaudio/_extension/utils.py,sha256=UQCObmKAsgdHhXU2dQYYxyFXwfdTsBO9bnrQmpQNN_I,4926
|
|
83
|
+
torchaudio/functional/filtering.py,sha256=rML8MismfehSeglw65kUkfugoP6XDtWcs_XhCl6aJM4,62325
|
|
84
|
+
torchaudio/functional/__init__.py,sha256=_5eT3FZFO6GXmKqFkPY4c_w7F7Isqnd8CTP2FdMxfVM,2451
|
|
85
|
+
torchaudio/functional/_alignment.py,sha256=NveQ74x8PmleuB-Ka9eEYYyshbV7nYc0g-Tu3NGHdz0,4739
|
|
86
|
+
torchaudio/functional/functional.py,sha256=5l-07BLVAs1PNU8NM2CPV_GTnq3V8nbV9tI7t0v79Y4,94731
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
torchaudio
|