neutts 0.1.1.dev1__tar.gz → 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: neutts
3
- Version: 0.1.1.dev1
3
+ Version: 0.1.2
4
4
  Summary: NeuTTS - a package for text-to-speech generation using Neuphonics TTS models.
5
5
  Author-email: neuphonic <general@neuphonic.com>
6
6
  Requires-Python: >=3.9
@@ -138,13 +138,19 @@ We include benchmarks on four devices: Galaxy A25 5G, AMD Ryzen 9HX 370, iMac M4
138
138
  ```
139
139
 
140
140
  3. **Install NeuTTS**
141
- ```pip install neutts```
142
-
141
+ ```bash
142
+ pip install neutts
143
+ ```
144
+ alternatively
145
+ ```bash
146
+ pip install neutts[all] # to get onnx and llamacpp dependency
147
+ ```
148
+
143
149
 
144
150
  4. **(Optional) Install Llama-cpp-python to use the `GGUF` models.**
145
151
 
146
152
  ```bash
147
- pip install llama-cpp-python
153
+ pip install "neutts[llama]"
148
154
  ```
149
155
 
150
156
  To run llama-cpp with GPU suport (CUDA, MPS) support please refer to:
@@ -153,7 +159,7 @@ We include benchmarks on four devices: Galaxy A25 5G, AMD Ryzen 9HX 370, iMac M4
153
159
  5. **(Optional) Install onnxruntime to use the `.onnx` decoder.**
154
160
  If you want to run the onnxdecoder
155
161
  ```bash
156
- pip install onnxruntime
162
+ pip install "neutts[onnx]"
157
163
  ```
158
164
 
159
165
  ## Running the Model
@@ -178,7 +184,7 @@ from neutts import NeuTTS
178
184
  import soundfile as sf
179
185
 
180
186
  tts = NeuTTS(
181
- backbone_repo="neuphonic/neutts-nano", # or 'neutts-nano-q4-gguf' with llama-cpp-python installed
187
+ backbone_repo="neuphonic/neutts-nano", # or 'neuphonic/neutts-nano-q4-gguf' with llama-cpp-python installed
182
188
  backbone_device="cpu",
183
189
  codec_repo="neuphonic/neucodec",
184
190
  codec_device="cpu"
@@ -113,13 +113,19 @@ We include benchmarks on four devices: Galaxy A25 5G, AMD Ryzen 9HX 370, iMac M4
113
113
  ```
114
114
 
115
115
  3. **Install NeuTTS**
116
- ```pip install neutts```
117
-
116
+ ```bash
117
+ pip install neutts
118
+ ```
119
+ alternatively
120
+ ```bash
121
+ pip install neutts[all] # to get onnx and llamacpp dependency
122
+ ```
123
+
118
124
 
119
125
  4. **(Optional) Install Llama-cpp-python to use the `GGUF` models.**
120
126
 
121
127
  ```bash
122
- pip install llama-cpp-python
128
+ pip install "neutts[llama]"
123
129
  ```
124
130
 
125
131
  To run llama-cpp with GPU suport (CUDA, MPS) support please refer to:
@@ -128,7 +134,7 @@ We include benchmarks on four devices: Galaxy A25 5G, AMD Ryzen 9HX 370, iMac M4
128
134
  5. **(Optional) Install onnxruntime to use the `.onnx` decoder.**
129
135
  If you want to run the onnxdecoder
130
136
  ```bash
131
- pip install onnxruntime
137
+ pip install "neutts[onnx]"
132
138
  ```
133
139
 
134
140
  ## Running the Model
@@ -153,7 +159,7 @@ from neutts import NeuTTS
153
159
  import soundfile as sf
154
160
 
155
161
  tts = NeuTTS(
156
- backbone_repo="neuphonic/neutts-nano", # or 'neutts-nano-q4-gguf' with llama-cpp-python installed
162
+ backbone_repo="neuphonic/neutts-nano", # or 'neuphonic/neutts-nano-q4-gguf' with llama-cpp-python installed
157
163
  backbone_device="cpu",
158
164
  codec_repo="neuphonic/neucodec",
159
165
  codec_device="cpu"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: neutts
3
- Version: 0.1.1.dev1
3
+ Version: 0.1.2
4
4
  Summary: NeuTTS - a package for text-to-speech generation using Neuphonics TTS models.
5
5
  Author-email: neuphonic <general@neuphonic.com>
6
6
  Requires-Python: >=3.9
@@ -138,13 +138,19 @@ We include benchmarks on four devices: Galaxy A25 5G, AMD Ryzen 9HX 370, iMac M4
138
138
  ```
139
139
 
140
140
  3. **Install NeuTTS**
141
- ```pip install neutts```
142
-
141
+ ```bash
142
+ pip install neutts
143
+ ```
144
+ alternatively
145
+ ```bash
146
+ pip install neutts[all] # to get onnx and llamacpp dependency
147
+ ```
148
+
143
149
 
144
150
  4. **(Optional) Install Llama-cpp-python to use the `GGUF` models.**
145
151
 
146
152
  ```bash
147
- pip install llama-cpp-python
153
+ pip install "neutts[llama]"
148
154
  ```
149
155
 
150
156
  To run llama-cpp with GPU suport (CUDA, MPS) support please refer to:
@@ -153,7 +159,7 @@ We include benchmarks on four devices: Galaxy A25 5G, AMD Ryzen 9HX 370, iMac M4
153
159
  5. **(Optional) Install onnxruntime to use the `.onnx` decoder.**
154
160
  If you want to run the onnxdecoder
155
161
  ```bash
156
- pip install onnxruntime
162
+ pip install "neutts[onnx]"
157
163
  ```
158
164
 
159
165
  ## Running the Model
@@ -178,7 +184,7 @@ from neutts import NeuTTS
178
184
  import soundfile as sf
179
185
 
180
186
  tts = NeuTTS(
181
- backbone_repo="neuphonic/neutts-nano", # or 'neutts-nano-q4-gguf' with llama-cpp-python installed
187
+ backbone_repo="neuphonic/neutts-nano", # or 'neuphonic/neutts-nano-q4-gguf' with llama-cpp-python installed
182
188
  backbone_device="cpu",
183
189
  codec_repo="neuphonic/neucodec",
184
190
  codec_device="cpu"
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "neutts"
7
- version = "0.1.1.dev1"
7
+ version = "0.1.2"
8
8
  authors = [
9
9
  { name="neuphonic", email="general@neuphonic.com" },
10
10
  ]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes