torchaudio 2.7.0__cp312-cp312-win_amd64.whl → 2.8.0__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of torchaudio might be problematic. Click here for more details.

Files changed (52) hide show
  1. torchaudio/__init__.py +16 -5
  2. torchaudio/_backend/sox.py +2 -2
  3. torchaudio/_backend/utils.py +33 -0
  4. torchaudio/_internal/module_utils.py +59 -10
  5. torchaudio/_torchcodec.py +352 -0
  6. torchaudio/backend/no_backend.py +2 -2
  7. torchaudio/backend/soundfile_backend.py +2 -2
  8. torchaudio/backend/sox_io_backend.py +2 -2
  9. torchaudio/functional/__init__.py +6 -1
  10. torchaudio/functional/functional.py +7 -3
  11. torchaudio/io/__init__.py +10 -3
  12. torchaudio/kaldi_io.py +6 -0
  13. torchaudio/lib/_torchaudio.pyd +0 -0
  14. torchaudio/lib/libtorchaudio.pyd +0 -0
  15. torchaudio/models/decoder/__init__.py +7 -1
  16. torchaudio/pipelines/_tts/utils.py +3 -1
  17. torchaudio/prototype/datasets/musan.py +2 -1
  18. torchaudio/prototype/functional/_dsp.py +8 -0
  19. torchaudio/prototype/functional/_rir.py +3 -0
  20. torchaudio/prototype/functional/functional.py +3 -0
  21. torchaudio/prototype/models/__init__.py +4 -1
  22. torchaudio/prototype/models/_conformer_wav2vec2.py +7 -0
  23. torchaudio/prototype/models/_emformer_hubert.py +4 -0
  24. torchaudio/prototype/models/conv_emformer.py +4 -0
  25. torchaudio/prototype/models/hifi_gan.py +6 -0
  26. torchaudio/prototype/models/rnnt.py +6 -0
  27. torchaudio/prototype/models/rnnt_decoder.py +3 -0
  28. torchaudio/prototype/pipelines/__init__.py +11 -2
  29. torchaudio/prototype/pipelines/_vggish/__init__.py +5 -1
  30. torchaudio/prototype/pipelines/_vggish/_vggish_impl.py +4 -1
  31. torchaudio/prototype/pipelines/_vggish/_vggish_pipeline.py +3 -2
  32. torchaudio/prototype/pipelines/hifigan_pipeline.py +5 -0
  33. torchaudio/prototype/transforms/_transforms.py +6 -1
  34. torchaudio/sox_effects/sox_effects.py +4 -1
  35. torchaudio/transforms/__init__.py +3 -1
  36. torchaudio/transforms/_transforms.py +3 -2
  37. torchaudio/utils/download.py +2 -0
  38. torchaudio/utils/sox_utils.py +19 -0
  39. torchaudio/version.py +2 -2
  40. {torchaudio-2.7.0.dist-info → torchaudio-2.8.0.dist-info}/METADATA +136 -124
  41. {torchaudio-2.7.0.dist-info → torchaudio-2.8.0.dist-info}/RECORD +145 -144
  42. {torchaudio-2.7.0.dist-info → torchaudio-2.8.0.dist-info}/WHEEL +1 -1
  43. torio/io/_streaming_media_decoder.py +0 -1
  44. torio/lib/_torio_ffmpeg4.pyd +0 -0
  45. torio/lib/_torio_ffmpeg5.pyd +0 -0
  46. torio/lib/_torio_ffmpeg6.pyd +0 -0
  47. torio/lib/libtorio_ffmpeg4.pyd +0 -0
  48. torio/lib/libtorio_ffmpeg5.pyd +0 -0
  49. torio/lib/libtorio_ffmpeg6.pyd +0 -0
  50. torio/utils/ffmpeg_utils.py +28 -0
  51. {torchaudio-2.7.0.dist-info → torchaudio-2.8.0.dist-info/licenses}/LICENSE +0 -0
  52. {torchaudio-2.7.0.dist-info → torchaudio-2.8.0.dist-info}/top_level.txt +0 -0
@@ -30,7 +30,9 @@ def _get_hash(path, hash, chunk_size=1028):
30
30
  data = file.read(chunk_size)
31
31
  return m.hexdigest()
32
32
 
33
+ from torchaudio._internal.module_utils import dropping_support
33
34
 
35
+ @dropping_support
34
36
  def download_asset(
35
37
  key: str,
36
38
  hash: str = "",
@@ -1,5 +1,15 @@
1
1
  """Module to change the configuration of libsox, which is used by I/O functions like
2
2
  :py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
3
+
4
+ .. warning::
5
+ Starting with version 2.8, we are refactoring TorchAudio to transition it
6
+ into a maintenance phase. As a result:
7
+
8
+ - Some APIs are deprecated in 2.8 and will be removed in 2.9.
9
+ - The decoding and encoding capabilities of PyTorch for both audio and video
10
+ are being consolidated into TorchCodec.
11
+
12
+ Please see https://github.com/pytorch/audio/issues/3902 for more information.
3
13
  """
4
14
 
5
15
  from typing import Dict, List
@@ -8,7 +18,9 @@ import torchaudio
8
18
 
9
19
  sox_ext = torchaudio._extension.lazy_import_sox_ext()
10
20
 
21
+ from torchaudio._internal.module_utils import dropping_support
11
22
 
23
+ @dropping_support
12
24
  def set_seed(seed: int):
13
25
  """Set libsox's PRNG
14
26
 
@@ -21,6 +33,7 @@ def set_seed(seed: int):
21
33
  sox_ext.set_seed(seed)
22
34
 
23
35
 
36
+ @dropping_support
24
37
  def set_verbosity(verbosity: int):
25
38
  """Set libsox's verbosity
26
39
 
@@ -38,6 +51,7 @@ def set_verbosity(verbosity: int):
38
51
  sox_ext.set_verbosity(verbosity)
39
52
 
40
53
 
54
+ @dropping_support
41
55
  def set_buffer_size(buffer_size: int):
42
56
  """Set buffer size for sox effect chain
43
57
 
@@ -50,6 +64,7 @@ def set_buffer_size(buffer_size: int):
50
64
  sox_ext.set_buffer_size(buffer_size)
51
65
 
52
66
 
67
+ @dropping_support
53
68
  def set_use_threads(use_threads: bool):
54
69
  """Set multithread option for sox effect chain
55
70
 
@@ -63,6 +78,7 @@ def set_use_threads(use_threads: bool):
63
78
  sox_ext.set_use_threads(use_threads)
64
79
 
65
80
 
81
+ @dropping_support
66
82
  def list_effects() -> Dict[str, str]:
67
83
  """List the available sox effect names
68
84
 
@@ -72,6 +88,7 @@ def list_effects() -> Dict[str, str]:
72
88
  return dict(sox_ext.list_effects())
73
89
 
74
90
 
91
+ @dropping_support
75
92
  def list_read_formats() -> List[str]:
76
93
  """List the supported audio formats for read
77
94
 
@@ -81,6 +98,7 @@ def list_read_formats() -> List[str]:
81
98
  return sox_ext.list_read_formats()
82
99
 
83
100
 
101
+ @dropping_support
84
102
  def list_write_formats() -> List[str]:
85
103
  """List the supported audio formats for write
86
104
 
@@ -90,6 +108,7 @@ def list_write_formats() -> List[str]:
90
108
  return sox_ext.list_write_formats()
91
109
 
92
110
 
111
+ @dropping_support
93
112
  def get_buffer_size() -> int:
94
113
  """Get buffer size for sox effect chain
95
114
 
torchaudio/version.py CHANGED
@@ -1,2 +1,2 @@
1
- __version__ = '2.7.0+cpu'
2
- git_version = '654fee8fd17784271be1637eac1293fd834b4e9a'
1
+ __version__ = '2.8.0+cpu'
2
+ git_version = '6e1c7fe9ff6d82b8665d0a46d859d3357d2ebaaa'
@@ -1,124 +1,136 @@
1
- Metadata-Version: 2.2
2
- Name: torchaudio
3
- Version: 2.7.0
4
- Summary: An audio package for PyTorch
5
- Home-page: https://github.com/pytorch/audio
6
- Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang
7
- Author-email: soumith@pytorch.org
8
- Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang
9
- Maintainer-email: moto@meta.com
10
- Classifier: Environment :: Plugins
11
- Classifier: Intended Audience :: Developers
12
- Classifier: Intended Audience :: Science/Research
13
- Classifier: License :: OSI Approved :: BSD License
14
- Classifier: Operating System :: MacOS :: MacOS X
15
- Classifier: Operating System :: Microsoft :: Windows
16
- Classifier: Operating System :: POSIX
17
- Classifier: Programming Language :: C++
18
- Classifier: Programming Language :: Python :: 3.9
19
- Classifier: Programming Language :: Python :: 3.10
20
- Classifier: Programming Language :: Python :: 3.11
21
- Classifier: Programming Language :: Python :: 3.12
22
- Classifier: Programming Language :: Python :: 3.13
23
- Classifier: Programming Language :: Python :: Implementation :: CPython
24
- Classifier: Topic :: Multimedia :: Sound/Audio
25
- Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
26
- Description-Content-Type: text/markdown
27
- License-File: LICENSE
28
- Requires-Dist: torch==2.7.0
29
- Dynamic: author
30
- Dynamic: author-email
31
- Dynamic: classifier
32
- Dynamic: description
33
- Dynamic: description-content-type
34
- Dynamic: home-page
35
- Dynamic: maintainer
36
- Dynamic: maintainer-email
37
- Dynamic: requires-dist
38
- Dynamic: summary
39
-
40
- torchaudio: an audio library for PyTorch
41
- ========================================
42
-
43
- [![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/)
44
- [![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio)
45
- [![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio)
46
-
47
- ![TorchAudio Logo](docs/source/_static/img/logo.png)
48
-
49
- The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to
50
- the audio domain. By supporting PyTorch, torchaudio follows the same philosophy
51
- of providing strong GPU acceleration, having a focus on trainable features through
52
- the autograd system, and having consistent style (tensor names and dimension names).
53
- Therefore, it is primarily a machine learning library and not a general signal
54
- processing library. The benefits of PyTorch can be seen in torchaudio through
55
- having all the computations be through PyTorch operations which makes it easy
56
- to use and feel like a natural extension.
57
-
58
- - [Support audio I/O (Load files, Save files)](http://pytorch.org/audio/main/)
59
- - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX
60
- - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html)
61
- - [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html)
62
- - Audio and speech processing functions
63
- - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html)
64
- - Common audio transforms
65
- - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html)
66
- - Compliance interfaces: Run code using PyTorch that align with other libraries
67
- - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html)
68
-
69
- Installation
70
- ------------
71
-
72
- Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio.
73
-
74
-
75
- API Reference
76
- -------------
77
-
78
- API Reference is located here: http://pytorch.org/audio/main/
79
-
80
- Contributing Guidelines
81
- -----------------------
82
-
83
- Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md)
84
-
85
- Citation
86
- --------
87
-
88
- If you find this package useful, please cite as:
89
-
90
- ```bibtex
91
- @article{yang2021torchaudio,
92
- title={TorchAudio: Building Blocks for Audio and Speech Processing},
93
- author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi},
94
- journal={arXiv preprint arXiv:2110.15018},
95
- year={2021}
96
- }
97
- ```
98
-
99
- ```bibtex
100
- @misc{hwang2023torchaudio,
101
- title={TorchAudio 2.1: Advancing speech recognition, self-supervised learning, and audio processing components for PyTorch},
102
- author={Jeff Hwang and Moto Hira and Caroline Chen and Xiaohui Zhang and Zhaoheng Ni and Guangzhi Sun and Pingchuan Ma and Ruizhe Huang and Vineel Pratap and Yuekai Zhang and Anurag Kumar and Chin-Yun Yu and Chuang Zhu and Chunxi Liu and Jacob Kahn and Mirco Ravanelli and Peng Sun and Shinji Watanabe and Yangyang Shi and Yumeng Tao and Robin Scheibler and Samuele Cornell and Sean Kim and Stavros Petridis},
103
- year={2023},
104
- eprint={2310.17864},
105
- archivePrefix={arXiv},
106
- primaryClass={eess.AS}
107
- }
108
- ```
109
-
110
- Disclaimer on Datasets
111
- ----------------------
112
-
113
- This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license.
114
-
115
- If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community!
116
-
117
- Pre-trained Model License
118
- -------------------------
119
-
120
- The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case.
121
-
122
- For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details.
123
-
124
- Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/).
1
+ Metadata-Version: 2.4
2
+ Name: torchaudio
3
+ Version: 2.8.0
4
+ Summary: An audio package for PyTorch
5
+ Home-page: https://github.com/pytorch/audio
6
+ Author: Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough, Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang
7
+ Author-email: soumith@pytorch.org
8
+ Maintainer: Moto Hira, Caroline Chen, Jeff Hwang, Zhaoheng Ni, Xiaohui Zhang
9
+ Maintainer-email: moto@meta.com
10
+ Classifier: Environment :: Plugins
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: License :: OSI Approved :: BSD License
14
+ Classifier: Operating System :: MacOS :: MacOS X
15
+ Classifier: Operating System :: Microsoft :: Windows
16
+ Classifier: Operating System :: POSIX
17
+ Classifier: Programming Language :: C++
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Programming Language :: Python :: 3.13
23
+ Classifier: Programming Language :: Python :: Implementation :: CPython
24
+ Classifier: Topic :: Multimedia :: Sound/Audio
25
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
26
+ Description-Content-Type: text/markdown
27
+ License-File: LICENSE
28
+ Requires-Dist: torch==2.8.0
29
+ Dynamic: author
30
+ Dynamic: author-email
31
+ Dynamic: classifier
32
+ Dynamic: description
33
+ Dynamic: description-content-type
34
+ Dynamic: home-page
35
+ Dynamic: license-file
36
+ Dynamic: maintainer
37
+ Dynamic: maintainer-email
38
+ Dynamic: requires-dist
39
+ Dynamic: summary
40
+
41
+ torchaudio: an audio library for PyTorch
42
+ ========================================
43
+
44
+ [![Documentation](https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchaudio%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v)](https://pytorch.org/audio/main/)
45
+ [![Anaconda Badge](https://anaconda.org/pytorch/torchaudio/badges/downloads.svg)](https://anaconda.org/pytorch/torchaudio)
46
+ [![Anaconda-Server Badge](https://anaconda.org/pytorch/torchaudio/badges/platforms.svg)](https://anaconda.org/pytorch/torchaudio)
47
+
48
+ ![TorchAudio Logo](docs/source/_static/img/logo.png)
49
+
50
+ > [!NOTE]
51
+ > **We are in the process of refactoring TorchAudio and transitioning it into a
52
+ > maintenance phase. This process will include removing some user-facing
53
+ > features: those features are deprecated from TorchAudio 2.8 and will be removed in 2.9.
54
+ > Our main goals are to reduce redundancies with the rest of the
55
+ > PyTorch ecosystem, make it easier to maintain, and create a version of
56
+ > TorchAudio that is more tightly scoped to its strengths: processing audio
57
+ > data for ML. Please see
58
+ > [our community message](https://github.com/pytorch/audio/issues/3902)
59
+ > for more details.**
60
+
61
+ The aim of torchaudio is to apply [PyTorch](https://github.com/pytorch/pytorch) to
62
+ the audio domain. By supporting PyTorch, torchaudio follows the same philosophy
63
+ of providing strong GPU acceleration, having a focus on trainable features through
64
+ the autograd system, and having consistent style (tensor names and dimension names).
65
+ Therefore, it is primarily a machine learning library and not a general signal
66
+ processing library. The benefits of PyTorch can be seen in torchaudio through
67
+ having all the computations be through PyTorch operations which makes it easy
68
+ to use and feel like a natural extension.
69
+
70
+ - [Support audio I/O (Load files, Save files)](http://pytorch.org/audio/main/)
71
+ - Load a variety of audio formats, such as `wav`, `mp3`, `ogg`, `flac`, `opus`, `sphere`, into a torch Tensor using SoX
72
+ - [Kaldi (ark/scp)](http://pytorch.org/audio/main/kaldi_io.html)
73
+ - [Dataloaders for common audio datasets](http://pytorch.org/audio/main/datasets.html)
74
+ - Audio and speech processing functions
75
+ - [forced_align](https://pytorch.org/audio/main/generated/torchaudio.functional.forced_align.html)
76
+ - Common audio transforms
77
+ - [Spectrogram, AmplitudeToDB, MelScale, MelSpectrogram, MFCC, MuLawEncoding, MuLawDecoding, Resample](http://pytorch.org/audio/main/transforms.html)
78
+ - Compliance interfaces: Run code using PyTorch that align with other libraries
79
+ - [Kaldi: spectrogram, fbank, mfcc](https://pytorch.org/audio/main/compliance.kaldi.html)
80
+
81
+ Installation
82
+ ------------
83
+
84
+ Please refer to https://pytorch.org/audio/main/installation.html for installation and build process of TorchAudio.
85
+
86
+
87
+ API Reference
88
+ -------------
89
+
90
+ API Reference is located here: http://pytorch.org/audio/main/
91
+
92
+ Contributing Guidelines
93
+ -----------------------
94
+
95
+ Please refer to [CONTRIBUTING.md](./CONTRIBUTING.md)
96
+
97
+ Citation
98
+ --------
99
+
100
+ If you find this package useful, please cite as:
101
+
102
+ ```bibtex
103
+ @article{yang2021torchaudio,
104
+ title={TorchAudio: Building Blocks for Audio and Speech Processing},
105
+ author={Yao-Yuan Yang and Moto Hira and Zhaoheng Ni and Anjali Chourdia and Artyom Astafurov and Caroline Chen and Ching-Feng Yeh and Christian Puhrsch and David Pollack and Dmitriy Genzel and Donny Greenberg and Edward Z. Yang and Jason Lian and Jay Mahadeokar and Jeff Hwang and Ji Chen and Peter Goldsborough and Prabhat Roy and Sean Narenthiran and Shinji Watanabe and Soumith Chintala and Vincent Quenneville-Bélair and Yangyang Shi},
106
+ journal={arXiv preprint arXiv:2110.15018},
107
+ year={2021}
108
+ }
109
+ ```
110
+
111
+ ```bibtex
112
+ @misc{hwang2023torchaudio,
113
+ title={TorchAudio 2.1: Advancing speech recognition, self-supervised learning, and audio processing components for PyTorch},
114
+ author={Jeff Hwang and Moto Hira and Caroline Chen and Xiaohui Zhang and Zhaoheng Ni and Guangzhi Sun and Pingchuan Ma and Ruizhe Huang and Vineel Pratap and Yuekai Zhang and Anurag Kumar and Chin-Yun Yu and Chuang Zhu and Chunxi Liu and Jacob Kahn and Mirco Ravanelli and Peng Sun and Shinji Watanabe and Yangyang Shi and Yumeng Tao and Robin Scheibler and Samuele Cornell and Sean Kim and Stavros Petridis},
115
+ year={2023},
116
+ eprint={2310.17864},
117
+ archivePrefix={arXiv},
118
+ primaryClass={eess.AS}
119
+ }
120
+ ```
121
+
122
+ Disclaimer on Datasets
123
+ ----------------------
124
+
125
+ This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license.
126
+
127
+ If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community!
128
+
129
+ Pre-trained Model License
130
+ -------------------------
131
+
132
+ The pre-trained models provided in this library may have their own licenses or terms and conditions derived from the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case.
133
+
134
+ For instance, SquimSubjective model is released under the Creative Commons Attribution Non Commercial 4.0 International (CC-BY-NC 4.0) license. See [the link](https://zenodo.org/record/4660670#.ZBtWPOxuerN) for additional details.
135
+
136
+ Other pre-trained models that have different license are noted in documentation. Please checkout the [documentation page](https://pytorch.org/audio/main/).