cartesia 1.0.4__tar.gz → 1.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cartesia-1.0.5/LICENSE.md +21 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/PKG-INFO +92 -13
- {cartesia-1.0.4 → cartesia-1.0.5}/README.md +90 -12
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia/client.py +192 -4
- cartesia-1.0.5/cartesia/version.py +1 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia.egg-info/PKG-INFO +92 -13
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia.egg-info/SOURCES.txt +1 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia.egg-info/requires.txt +1 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/tests/test_tts.py +40 -8
- cartesia-1.0.4/cartesia/version.py +0 -1
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia/__init__.py +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia/_types.py +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia/utils/__init__.py +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia/utils/deprecated.py +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia/utils/retry.py +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia.egg-info/dependency_links.txt +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/cartesia.egg-info/top_level.txt +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/pyproject.toml +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/setup.cfg +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/setup.py +0 -0
- {cartesia-1.0.4 → cartesia-1.0.5}/tests/test_deprecated.py +0 -0
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2024 Cartesia AI, Inc.
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: cartesia
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.5
|
4
4
|
Summary: The official Python library for the Cartesia API.
|
5
5
|
Home-page:
|
6
6
|
Author: Cartesia, Inc.
|
@@ -12,6 +12,7 @@ Requires-Python: >=3.8.0
|
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
Provides-Extra: dev
|
14
14
|
Provides-Extra: all
|
15
|
+
License-File: LICENSE.md
|
15
16
|
|
16
17
|
|
17
18
|
# Cartesia Python API Library
|
@@ -80,10 +81,10 @@ voice = client.voices.get(id=voice_id)
|
|
80
81
|
|
81
82
|
transcript = "Hello! Welcome to Cartesia"
|
82
83
|
|
83
|
-
# You can check out our models at
|
84
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
84
85
|
model_id = "sonic-english"
|
85
86
|
|
86
|
-
# You can find the supported `output_format`s
|
87
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
87
88
|
output_format = {
|
88
89
|
"container": "raw",
|
89
90
|
"encoding": "pcm_f32le",
|
@@ -131,10 +132,10 @@ async def write_stream():
|
|
131
132
|
voice_id = "a0e99841-438c-4a64-b679-ae501e7d6091"
|
132
133
|
voice = client.voices.get(id=voice_id)
|
133
134
|
transcript = "Hello! Welcome to Cartesia"
|
134
|
-
# You can check out our models at
|
135
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
135
136
|
model_id = "sonic-english"
|
136
137
|
|
137
|
-
# You can find the supported `output_format`s
|
138
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
138
139
|
output_format = {
|
139
140
|
"container": "raw",
|
140
141
|
"encoding": "pcm_f32le",
|
@@ -186,10 +187,10 @@ voice_id = "a0e99841-438c-4a64-b679-ae501e7d6091"
|
|
186
187
|
voice = client.voices.get(id=voice_id)
|
187
188
|
transcript = "Hello! Welcome to Cartesia"
|
188
189
|
|
189
|
-
# You can check out our models at
|
190
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
190
191
|
model_id = "sonic-english"
|
191
192
|
|
192
|
-
# You can find the supported `output_format`s
|
193
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
193
194
|
output_format = {
|
194
195
|
"container": "raw",
|
195
196
|
"encoding": "pcm_f32le",
|
@@ -233,7 +234,7 @@ In some cases, input text may need to be streamed in. In these cases, it would b
|
|
233
234
|
|
234
235
|
To mitigate this, Cartesia offers audio continuations. In this setting, users can send input text, as it becomes available, over a websocket connection.
|
235
236
|
|
236
|
-
To do this, we will create a `context` and
|
237
|
+
To do this, we will create a `context` and send multiple requests without awaiting the response. Then you can listen to the responses in the order they were sent.
|
237
238
|
|
238
239
|
Each `context` will be closed automatically after 5 seconds of inactivity or when the `no_more_inputs` method is called. `no_more_inputs` sends a request with the `continue_=False`, which indicates no more inputs will be sent over this context
|
239
240
|
|
@@ -244,13 +245,13 @@ import pyaudio
|
|
244
245
|
from cartesia import AsyncCartesia
|
245
246
|
|
246
247
|
async def send_transcripts(ctx):
|
247
|
-
# Check out voice IDs by calling `client.voices.list()` or on
|
248
|
+
# Check out voice IDs by calling `client.voices.list()` or on https://play.cartesia.ai/
|
248
249
|
voice_id = "87748186-23bb-4158-a1eb-332911b0b708"
|
249
250
|
|
250
|
-
# You can check out our models at
|
251
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
251
252
|
model_id = "sonic-english"
|
252
253
|
|
253
|
-
# You can find the supported `output_format`s
|
254
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
254
255
|
output_format = {
|
255
256
|
"container": "raw",
|
256
257
|
"encoding": "pcm_f32le",
|
@@ -322,6 +323,84 @@ async def stream_and_listen():
|
|
322
323
|
asyncio.run(stream_and_listen())
|
323
324
|
```
|
324
325
|
|
326
|
+
You can also use continuations on the synchronous Cartesia client to stream in text as it becomes available. To do this, pass in a text generator that produces text chunks at intervals of less than 1 second, as shown below. This ensures smooth audio playback.
|
327
|
+
|
328
|
+
Note: the sync client has a different API for continuations compared to the async client.
|
329
|
+
|
330
|
+
```python
|
331
|
+
from cartesia import Cartesia
|
332
|
+
import pyaudio
|
333
|
+
import os
|
334
|
+
|
335
|
+
client = Cartesia(api_key=os.environ.get("CARTESIA_API_KEY"))
|
336
|
+
|
337
|
+
transcripts = [
|
338
|
+
"The crew engaged in a range of activities designed to mirror those "
|
339
|
+
"they might perform on a real Mars mission. ",
|
340
|
+
"Aside from growing vegetables and maintaining their habitat, they faced "
|
341
|
+
"additional stressors like communication delays with Earth, ",
|
342
|
+
"up to twenty-two minutes each way, to simulate the distance from Mars to our planet. ",
|
343
|
+
"These exercises were critical for understanding how astronauts can "
|
344
|
+
"maintain not just physical health but also mental well-being under such challenging conditions. ",
|
345
|
+
]
|
346
|
+
|
347
|
+
# Ending each transcript with a space makes the audio smoother
|
348
|
+
def chunk_generator(transcripts):
|
349
|
+
for transcript in transcripts:
|
350
|
+
if transcript.endswith(" "):
|
351
|
+
yield transcript
|
352
|
+
else:
|
353
|
+
yield transcript + " "
|
354
|
+
|
355
|
+
|
356
|
+
# You can check out voice IDs by calling `client.voices.list()` or on https://play.cartesia.ai/
|
357
|
+
voice_id = "87748186-23bb-4158-a1eb-332911b0b708"
|
358
|
+
|
359
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
360
|
+
model_id = "sonic-english"
|
361
|
+
|
362
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
363
|
+
output_format = {
|
364
|
+
"container": "raw",
|
365
|
+
"encoding": "pcm_f32le",
|
366
|
+
"sample_rate": 44100,
|
367
|
+
}
|
368
|
+
|
369
|
+
p = pyaudio.PyAudio()
|
370
|
+
rate = 44100
|
371
|
+
|
372
|
+
stream = None
|
373
|
+
|
374
|
+
# Set up the websocket connection
|
375
|
+
ws = client.tts.websocket()
|
376
|
+
|
377
|
+
# Create a context to send and receive audio
|
378
|
+
ctx = ws.context() # Generates a random context ID if not provided
|
379
|
+
|
380
|
+
# Pass in a text generator to generate & stream the audio
|
381
|
+
output_stream = ctx.send(
|
382
|
+
model_id=model_id,
|
383
|
+
transcript=chunk_generator(transcripts),
|
384
|
+
voice_id=voice_id,
|
385
|
+
output_format=output_format,
|
386
|
+
)
|
387
|
+
|
388
|
+
for output in output_stream:
|
389
|
+
buffer = output["audio"]
|
390
|
+
|
391
|
+
if not stream:
|
392
|
+
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=rate, output=True)
|
393
|
+
|
394
|
+
# Write the audio data to the stream
|
395
|
+
stream.write(buffer)
|
396
|
+
|
397
|
+
stream.stop_stream()
|
398
|
+
stream.close()
|
399
|
+
p.terminate()
|
400
|
+
|
401
|
+
ws.close() # Close the websocket connection
|
402
|
+
```
|
403
|
+
|
325
404
|
### Multilingual Text-to-Speech [Alpha]
|
326
405
|
|
327
406
|
You can use our `sonic-multilingual` model to generate audio in multiple languages. The languages supported are available at [docs.cartesia.ai](https://docs.cartesia.ai/getting-started/available-models).
|
@@ -339,10 +418,10 @@ voice = client.voices.get(id=voice_id)
|
|
339
418
|
transcript = "Hola! Bienvenido a Cartesia"
|
340
419
|
language = "es" # Language code corresponding to the language of the transcript
|
341
420
|
|
342
|
-
# Make sure you use the multilingual model! You can check out all models at
|
421
|
+
# Make sure you use the multilingual model! You can check out all models at https://docs.cartesia.ai/getting-started/available-models
|
343
422
|
model_id = "sonic-multilingual"
|
344
423
|
|
345
|
-
# You can find the supported `output_format`s
|
424
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
346
425
|
output_format = {
|
347
426
|
"container": "raw",
|
348
427
|
"encoding": "pcm_f32le",
|
@@ -64,10 +64,10 @@ voice = client.voices.get(id=voice_id)
|
|
64
64
|
|
65
65
|
transcript = "Hello! Welcome to Cartesia"
|
66
66
|
|
67
|
-
# You can check out our models at
|
67
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
68
68
|
model_id = "sonic-english"
|
69
69
|
|
70
|
-
# You can find the supported `output_format`s
|
70
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
71
71
|
output_format = {
|
72
72
|
"container": "raw",
|
73
73
|
"encoding": "pcm_f32le",
|
@@ -115,10 +115,10 @@ async def write_stream():
|
|
115
115
|
voice_id = "a0e99841-438c-4a64-b679-ae501e7d6091"
|
116
116
|
voice = client.voices.get(id=voice_id)
|
117
117
|
transcript = "Hello! Welcome to Cartesia"
|
118
|
-
# You can check out our models at
|
118
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
119
119
|
model_id = "sonic-english"
|
120
120
|
|
121
|
-
# You can find the supported `output_format`s
|
121
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
122
122
|
output_format = {
|
123
123
|
"container": "raw",
|
124
124
|
"encoding": "pcm_f32le",
|
@@ -170,10 +170,10 @@ voice_id = "a0e99841-438c-4a64-b679-ae501e7d6091"
|
|
170
170
|
voice = client.voices.get(id=voice_id)
|
171
171
|
transcript = "Hello! Welcome to Cartesia"
|
172
172
|
|
173
|
-
# You can check out our models at
|
173
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
174
174
|
model_id = "sonic-english"
|
175
175
|
|
176
|
-
# You can find the supported `output_format`s
|
176
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
177
177
|
output_format = {
|
178
178
|
"container": "raw",
|
179
179
|
"encoding": "pcm_f32le",
|
@@ -217,7 +217,7 @@ In some cases, input text may need to be streamed in. In these cases, it would b
|
|
217
217
|
|
218
218
|
To mitigate this, Cartesia offers audio continuations. In this setting, users can send input text, as it becomes available, over a websocket connection.
|
219
219
|
|
220
|
-
To do this, we will create a `context` and
|
220
|
+
To do this, we will create a `context` and send multiple requests without awaiting the response. Then you can listen to the responses in the order they were sent.
|
221
221
|
|
222
222
|
Each `context` will be closed automatically after 5 seconds of inactivity or when the `no_more_inputs` method is called. `no_more_inputs` sends a request with the `continue_=False`, which indicates no more inputs will be sent over this context
|
223
223
|
|
@@ -228,13 +228,13 @@ import pyaudio
|
|
228
228
|
from cartesia import AsyncCartesia
|
229
229
|
|
230
230
|
async def send_transcripts(ctx):
|
231
|
-
# Check out voice IDs by calling `client.voices.list()` or on
|
231
|
+
# Check out voice IDs by calling `client.voices.list()` or on https://play.cartesia.ai/
|
232
232
|
voice_id = "87748186-23bb-4158-a1eb-332911b0b708"
|
233
233
|
|
234
|
-
# You can check out our models at
|
234
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
235
235
|
model_id = "sonic-english"
|
236
236
|
|
237
|
-
# You can find the supported `output_format`s
|
237
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
238
238
|
output_format = {
|
239
239
|
"container": "raw",
|
240
240
|
"encoding": "pcm_f32le",
|
@@ -306,6 +306,84 @@ async def stream_and_listen():
|
|
306
306
|
asyncio.run(stream_and_listen())
|
307
307
|
```
|
308
308
|
|
309
|
+
You can also use continuations on the synchronous Cartesia client to stream in text as it becomes available. To do this, pass in a text generator that produces text chunks at intervals of less than 1 second, as shown below. This ensures smooth audio playback.
|
310
|
+
|
311
|
+
Note: the sync client has a different API for continuations compared to the async client.
|
312
|
+
|
313
|
+
```python
|
314
|
+
from cartesia import Cartesia
|
315
|
+
import pyaudio
|
316
|
+
import os
|
317
|
+
|
318
|
+
client = Cartesia(api_key=os.environ.get("CARTESIA_API_KEY"))
|
319
|
+
|
320
|
+
transcripts = [
|
321
|
+
"The crew engaged in a range of activities designed to mirror those "
|
322
|
+
"they might perform on a real Mars mission. ",
|
323
|
+
"Aside from growing vegetables and maintaining their habitat, they faced "
|
324
|
+
"additional stressors like communication delays with Earth, ",
|
325
|
+
"up to twenty-two minutes each way, to simulate the distance from Mars to our planet. ",
|
326
|
+
"These exercises were critical for understanding how astronauts can "
|
327
|
+
"maintain not just physical health but also mental well-being under such challenging conditions. ",
|
328
|
+
]
|
329
|
+
|
330
|
+
# Ending each transcript with a space makes the audio smoother
|
331
|
+
def chunk_generator(transcripts):
|
332
|
+
for transcript in transcripts:
|
333
|
+
if transcript.endswith(" "):
|
334
|
+
yield transcript
|
335
|
+
else:
|
336
|
+
yield transcript + " "
|
337
|
+
|
338
|
+
|
339
|
+
# You can check out voice IDs by calling `client.voices.list()` or on https://play.cartesia.ai/
|
340
|
+
voice_id = "87748186-23bb-4158-a1eb-332911b0b708"
|
341
|
+
|
342
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
343
|
+
model_id = "sonic-english"
|
344
|
+
|
345
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
346
|
+
output_format = {
|
347
|
+
"container": "raw",
|
348
|
+
"encoding": "pcm_f32le",
|
349
|
+
"sample_rate": 44100,
|
350
|
+
}
|
351
|
+
|
352
|
+
p = pyaudio.PyAudio()
|
353
|
+
rate = 44100
|
354
|
+
|
355
|
+
stream = None
|
356
|
+
|
357
|
+
# Set up the websocket connection
|
358
|
+
ws = client.tts.websocket()
|
359
|
+
|
360
|
+
# Create a context to send and receive audio
|
361
|
+
ctx = ws.context() # Generates a random context ID if not provided
|
362
|
+
|
363
|
+
# Pass in a text generator to generate & stream the audio
|
364
|
+
output_stream = ctx.send(
|
365
|
+
model_id=model_id,
|
366
|
+
transcript=chunk_generator(transcripts),
|
367
|
+
voice_id=voice_id,
|
368
|
+
output_format=output_format,
|
369
|
+
)
|
370
|
+
|
371
|
+
for output in output_stream:
|
372
|
+
buffer = output["audio"]
|
373
|
+
|
374
|
+
if not stream:
|
375
|
+
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=rate, output=True)
|
376
|
+
|
377
|
+
# Write the audio data to the stream
|
378
|
+
stream.write(buffer)
|
379
|
+
|
380
|
+
stream.stop_stream()
|
381
|
+
stream.close()
|
382
|
+
p.terminate()
|
383
|
+
|
384
|
+
ws.close() # Close the websocket connection
|
385
|
+
```
|
386
|
+
|
309
387
|
### Multilingual Text-to-Speech [Alpha]
|
310
388
|
|
311
389
|
You can use our `sonic-multilingual` model to generate audio in multiple languages. The languages supported are available at [docs.cartesia.ai](https://docs.cartesia.ai/getting-started/available-models).
|
@@ -323,10 +401,10 @@ voice = client.voices.get(id=voice_id)
|
|
323
401
|
transcript = "Hola! Bienvenido a Cartesia"
|
324
402
|
language = "es" # Language code corresponding to the language of the transcript
|
325
403
|
|
326
|
-
# Make sure you use the multilingual model! You can check out all models at
|
404
|
+
# Make sure you use the multilingual model! You can check out all models at https://docs.cartesia.ai/getting-started/available-models
|
327
405
|
model_id = "sonic-multilingual"
|
328
406
|
|
329
|
-
# You can find the supported `output_format`s
|
407
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
330
408
|
output_format = {
|
331
409
|
"container": "raw",
|
332
410
|
"encoding": "pcm_f32le",
|
@@ -7,6 +7,7 @@ from types import TracebackType
|
|
7
7
|
from typing import (
|
8
8
|
Any,
|
9
9
|
AsyncGenerator,
|
10
|
+
Iterator,
|
10
11
|
Dict,
|
11
12
|
Generator,
|
12
13
|
List,
|
@@ -14,6 +15,7 @@ from typing import (
|
|
14
15
|
Tuple,
|
15
16
|
Union,
|
16
17
|
Callable,
|
18
|
+
Set,
|
17
19
|
)
|
18
20
|
|
19
21
|
import aiohttp
|
@@ -21,6 +23,7 @@ import httpx
|
|
21
23
|
import logging
|
22
24
|
import requests
|
23
25
|
from websockets.sync.client import connect
|
26
|
+
from iterators import TimeoutIterator
|
24
27
|
|
25
28
|
from cartesia.utils.retry import retry_on_connection_error, retry_on_connection_error_async
|
26
29
|
from cartesia._types import (
|
@@ -260,6 +263,165 @@ class Voices(Resource):
|
|
260
263
|
return response.json()
|
261
264
|
|
262
265
|
|
266
|
+
class _TTSContext:
|
267
|
+
"""Manage a single context over a WebSocket.
|
268
|
+
|
269
|
+
This class can be used to stream inputs, as they become available, to a specific `context_id`. See README for usage.
|
270
|
+
|
271
|
+
See :class:`_AsyncTTSContext` for asynchronous use cases.
|
272
|
+
|
273
|
+
Each TTSContext will close automatically when a done message is received for that context. It also closes if there is an error.
|
274
|
+
"""
|
275
|
+
|
276
|
+
def __init__(self, context_id: str, websocket: "_WebSocket"):
|
277
|
+
self._context_id = context_id
|
278
|
+
self._websocket = websocket
|
279
|
+
self._error = None
|
280
|
+
|
281
|
+
def __del__(self):
|
282
|
+
self._close()
|
283
|
+
|
284
|
+
@property
|
285
|
+
def context_id(self) -> str:
|
286
|
+
return self._context_id
|
287
|
+
|
288
|
+
def send(
|
289
|
+
self,
|
290
|
+
model_id: str,
|
291
|
+
transcript: Iterator[str],
|
292
|
+
output_format: OutputFormat,
|
293
|
+
voice_id: Optional[str] = None,
|
294
|
+
voice_embedding: Optional[List[float]] = None,
|
295
|
+
context_id: Optional[str] = None,
|
296
|
+
duration: Optional[int] = None,
|
297
|
+
language: Optional[str] = None,
|
298
|
+
) -> Generator[bytes, None, None]:
|
299
|
+
"""Send audio generation requests to the WebSocket and yield responses.
|
300
|
+
|
301
|
+
Args:
|
302
|
+
model_id: The ID of the model to use for generating audio.
|
303
|
+
transcript: Iterator over text chunks with <1s latency.
|
304
|
+
output_format: A dictionary containing the details of the output format.
|
305
|
+
voice_id: The ID of the voice to use for generating audio.
|
306
|
+
voice_embedding: The embedding of the voice to use for generating audio.
|
307
|
+
context_id: The context ID to use for the request. If not specified, a random context ID will be generated.
|
308
|
+
duration: The duration of the audio in seconds.
|
309
|
+
language: The language code for the audio request. This can only be used with `model_id = sonic-multilingual`
|
310
|
+
|
311
|
+
Yields:
|
312
|
+
Dictionary containing the following key(s):
|
313
|
+
- audio: The audio as bytes.
|
314
|
+
- context_id: The context ID for the request.
|
315
|
+
|
316
|
+
Raises:
|
317
|
+
ValueError: If provided context_id doesn't match the current context.
|
318
|
+
RuntimeError: If there's an error generating audio.
|
319
|
+
"""
|
320
|
+
if context_id is not None and context_id != self._context_id:
|
321
|
+
raise ValueError("Context ID does not match the context ID of the current context.")
|
322
|
+
|
323
|
+
self._websocket.connect()
|
324
|
+
|
325
|
+
voice = self._websocket._validate_and_construct_voice(voice_id, voice_embedding)
|
326
|
+
|
327
|
+
# Create the initial request body
|
328
|
+
request_body = {
|
329
|
+
"model_id": model_id,
|
330
|
+
"voice": voice,
|
331
|
+
"output_format": {
|
332
|
+
"container": output_format["container"],
|
333
|
+
"encoding": output_format["encoding"],
|
334
|
+
"sample_rate": output_format["sample_rate"],
|
335
|
+
},
|
336
|
+
"context_id": self._context_id,
|
337
|
+
"language": language,
|
338
|
+
}
|
339
|
+
|
340
|
+
if duration is not None:
|
341
|
+
request_body["duration"] = duration
|
342
|
+
|
343
|
+
try:
|
344
|
+
# Create an iterator with a timeout to get text chunks
|
345
|
+
text_iterator = TimeoutIterator(
|
346
|
+
transcript, timeout=0.001
|
347
|
+
) # 1ms timeout for nearly non-blocking receive
|
348
|
+
next_chunk = next(text_iterator, None)
|
349
|
+
|
350
|
+
while True:
|
351
|
+
# Send the next text chunk to the WebSocket if available
|
352
|
+
if next_chunk is not None and next_chunk != text_iterator.get_sentinel():
|
353
|
+
request_body["transcript"] = next_chunk
|
354
|
+
request_body["continue"] = True
|
355
|
+
self._websocket.websocket.send(json.dumps(request_body))
|
356
|
+
next_chunk = next(text_iterator, None)
|
357
|
+
|
358
|
+
try:
|
359
|
+
# Receive responses from the WebSocket with a small timeout
|
360
|
+
response = json.loads(
|
361
|
+
self._websocket.websocket.recv(timeout=0.001)
|
362
|
+
) # 1ms timeout for nearly non-blocking receive
|
363
|
+
if response["context_id"] != self._context_id:
|
364
|
+
pass
|
365
|
+
if "error" in response:
|
366
|
+
raise RuntimeError(f"Error generating audio:\n{response['error']}")
|
367
|
+
if response["done"]:
|
368
|
+
break
|
369
|
+
if response["data"]:
|
370
|
+
yield self._websocket._convert_response(
|
371
|
+
response=response, include_context_id=True
|
372
|
+
)
|
373
|
+
except TimeoutError:
|
374
|
+
pass
|
375
|
+
|
376
|
+
# Continuously receive from WebSocket until the next text chunk is available
|
377
|
+
while next_chunk == text_iterator.get_sentinel():
|
378
|
+
try:
|
379
|
+
response = json.loads(self._websocket.websocket.recv(timeout=0.001))
|
380
|
+
if response["context_id"] != self._context_id:
|
381
|
+
continue
|
382
|
+
if "error" in response:
|
383
|
+
raise RuntimeError(f"Error generating audio:\n{response['error']}")
|
384
|
+
if response["done"]:
|
385
|
+
break
|
386
|
+
if response["data"]:
|
387
|
+
yield self._websocket._convert_response(
|
388
|
+
response=response, include_context_id=True
|
389
|
+
)
|
390
|
+
except TimeoutError:
|
391
|
+
pass
|
392
|
+
next_chunk = next(text_iterator, None)
|
393
|
+
|
394
|
+
# Send final message if all input text chunks are exhausted
|
395
|
+
if next_chunk is None:
|
396
|
+
request_body["transcript"] = ""
|
397
|
+
request_body["continue"] = False
|
398
|
+
self._websocket.websocket.send(json.dumps(request_body))
|
399
|
+
break
|
400
|
+
|
401
|
+
# Receive remaining messages from the WebSocket until "done" is received
|
402
|
+
while True:
|
403
|
+
response = json.loads(self._websocket.websocket.recv())
|
404
|
+
if response["context_id"] != self._context_id:
|
405
|
+
continue
|
406
|
+
if "error" in response:
|
407
|
+
raise RuntimeError(f"Error generating audio:\n{response['error']}")
|
408
|
+
if response["done"]:
|
409
|
+
break
|
410
|
+
yield self._websocket._convert_response(response=response, include_context_id=True)
|
411
|
+
|
412
|
+
except Exception as e:
|
413
|
+
self._websocket.close()
|
414
|
+
raise RuntimeError(f"Failed to generate audio. {e}")
|
415
|
+
|
416
|
+
def _close(self):
|
417
|
+
"""Closes the context. Automatically called when a done message is received for this context."""
|
418
|
+
self._websocket._remove_context(self._context_id)
|
419
|
+
|
420
|
+
def is_closed(self):
|
421
|
+
"""Check if the context is closed or not. Returns True if closed."""
|
422
|
+
return self._context_id not in self._websocket._contexts
|
423
|
+
|
424
|
+
|
263
425
|
class _WebSocket:
|
264
426
|
"""This class contains methods to generate audio using WebSocket. Ideal for low-latency audio generation.
|
265
427
|
|
@@ -283,6 +445,13 @@ class _WebSocket:
|
|
283
445
|
self.api_key = api_key
|
284
446
|
self.cartesia_version = cartesia_version
|
285
447
|
self.websocket = None
|
448
|
+
self._contexts: Set[str] = set()
|
449
|
+
|
450
|
+
def __del__(self):
|
451
|
+
try:
|
452
|
+
self.close()
|
453
|
+
except Exception as e:
|
454
|
+
raise RuntimeError("Failed to close WebSocket: ", e)
|
286
455
|
|
287
456
|
def connect(self):
|
288
457
|
"""This method connects to the WebSocket if it is not already connected.
|
@@ -304,9 +473,12 @@ class _WebSocket:
|
|
304
473
|
|
305
474
|
def close(self):
|
306
475
|
"""This method closes the WebSocket connection. *Highly* recommended to call this method when done using the WebSocket."""
|
307
|
-
if self.websocket
|
476
|
+
if self.websocket and not self._is_websocket_closed():
|
308
477
|
self.websocket.close()
|
309
478
|
|
479
|
+
if self._contexts:
|
480
|
+
self._contexts.clear()
|
481
|
+
|
310
482
|
def _convert_response(
|
311
483
|
self, response: Dict[str, any], include_context_id: bool
|
312
484
|
) -> Dict[str, Any]:
|
@@ -426,10 +598,22 @@ class _WebSocket:
|
|
426
598
|
yield self._convert_response(response=response, include_context_id=True)
|
427
599
|
except Exception as e:
|
428
600
|
# Close the websocket connection if an error occurs.
|
429
|
-
|
430
|
-
self.websocket.close()
|
601
|
+
self.close()
|
431
602
|
raise RuntimeError(f"Failed to generate audio. {response}") from e
|
432
603
|
|
604
|
+
def _remove_context(self, context_id: str):
|
605
|
+
if context_id in self._contexts:
|
606
|
+
self._contexts.remove(context_id)
|
607
|
+
|
608
|
+
def context(self, context_id: Optional[str] = None) -> _TTSContext:
|
609
|
+
if context_id in self._contexts:
|
610
|
+
raise ValueError(f"Context for context ID {context_id} already exists.")
|
611
|
+
if context_id is None:
|
612
|
+
context_id = str(uuid.uuid4())
|
613
|
+
if context_id not in self._contexts:
|
614
|
+
self._contexts.add(context_id)
|
615
|
+
return _TTSContext(context_id, self)
|
616
|
+
|
433
617
|
|
434
618
|
class _SSE:
|
435
619
|
"""This class contains methods to generate audio using Server-Sent Events.
|
@@ -826,7 +1010,7 @@ class _AsyncSSE(_SSE):
|
|
826
1010
|
|
827
1011
|
|
828
1012
|
class _AsyncTTSContext:
|
829
|
-
"""Manage a single context over
|
1013
|
+
"""Manage a single context over an AsyncWebSocket.
|
830
1014
|
|
831
1015
|
This class separates sending requests and receiving responses into two separate methods.
|
832
1016
|
This can be used for sending multiple requests without awaiting the response.
|
@@ -945,6 +1129,10 @@ class _AsyncTTSContext:
|
|
945
1129
|
"""Closes the context. Automatically called when a done message is received for this context."""
|
946
1130
|
self._websocket._remove_context(self._context_id)
|
947
1131
|
|
1132
|
+
def is_closed(self):
|
1133
|
+
"""Check if the context is closed or not. Returns True if closed."""
|
1134
|
+
return self._context_id not in self._websocket._context_queues
|
1135
|
+
|
948
1136
|
async def __aenter__(self):
|
949
1137
|
return self
|
950
1138
|
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = "1.0.5"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: cartesia
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.5
|
4
4
|
Summary: The official Python library for the Cartesia API.
|
5
5
|
Home-page:
|
6
6
|
Author: Cartesia, Inc.
|
@@ -12,6 +12,7 @@ Requires-Python: >=3.8.0
|
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
Provides-Extra: dev
|
14
14
|
Provides-Extra: all
|
15
|
+
License-File: LICENSE.md
|
15
16
|
|
16
17
|
|
17
18
|
# Cartesia Python API Library
|
@@ -80,10 +81,10 @@ voice = client.voices.get(id=voice_id)
|
|
80
81
|
|
81
82
|
transcript = "Hello! Welcome to Cartesia"
|
82
83
|
|
83
|
-
# You can check out our models at
|
84
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
84
85
|
model_id = "sonic-english"
|
85
86
|
|
86
|
-
# You can find the supported `output_format`s
|
87
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
87
88
|
output_format = {
|
88
89
|
"container": "raw",
|
89
90
|
"encoding": "pcm_f32le",
|
@@ -131,10 +132,10 @@ async def write_stream():
|
|
131
132
|
voice_id = "a0e99841-438c-4a64-b679-ae501e7d6091"
|
132
133
|
voice = client.voices.get(id=voice_id)
|
133
134
|
transcript = "Hello! Welcome to Cartesia"
|
134
|
-
# You can check out our models at
|
135
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
135
136
|
model_id = "sonic-english"
|
136
137
|
|
137
|
-
# You can find the supported `output_format`s
|
138
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
138
139
|
output_format = {
|
139
140
|
"container": "raw",
|
140
141
|
"encoding": "pcm_f32le",
|
@@ -186,10 +187,10 @@ voice_id = "a0e99841-438c-4a64-b679-ae501e7d6091"
|
|
186
187
|
voice = client.voices.get(id=voice_id)
|
187
188
|
transcript = "Hello! Welcome to Cartesia"
|
188
189
|
|
189
|
-
# You can check out our models at
|
190
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
190
191
|
model_id = "sonic-english"
|
191
192
|
|
192
|
-
# You can find the supported `output_format`s
|
193
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
193
194
|
output_format = {
|
194
195
|
"container": "raw",
|
195
196
|
"encoding": "pcm_f32le",
|
@@ -233,7 +234,7 @@ In some cases, input text may need to be streamed in. In these cases, it would b
|
|
233
234
|
|
234
235
|
To mitigate this, Cartesia offers audio continuations. In this setting, users can send input text, as it becomes available, over a websocket connection.
|
235
236
|
|
236
|
-
To do this, we will create a `context` and
|
237
|
+
To do this, we will create a `context` and send multiple requests without awaiting the response. Then you can listen to the responses in the order they were sent.
|
237
238
|
|
238
239
|
Each `context` will be closed automatically after 5 seconds of inactivity or when the `no_more_inputs` method is called. `no_more_inputs` sends a request with the `continue_=False`, which indicates no more inputs will be sent over this context
|
239
240
|
|
@@ -244,13 +245,13 @@ import pyaudio
|
|
244
245
|
from cartesia import AsyncCartesia
|
245
246
|
|
246
247
|
async def send_transcripts(ctx):
|
247
|
-
# Check out voice IDs by calling `client.voices.list()` or on
|
248
|
+
# Check out voice IDs by calling `client.voices.list()` or on https://play.cartesia.ai/
|
248
249
|
voice_id = "87748186-23bb-4158-a1eb-332911b0b708"
|
249
250
|
|
250
|
-
# You can check out our models at
|
251
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
251
252
|
model_id = "sonic-english"
|
252
253
|
|
253
|
-
# You can find the supported `output_format`s
|
254
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
254
255
|
output_format = {
|
255
256
|
"container": "raw",
|
256
257
|
"encoding": "pcm_f32le",
|
@@ -322,6 +323,84 @@ async def stream_and_listen():
|
|
322
323
|
asyncio.run(stream_and_listen())
|
323
324
|
```
|
324
325
|
|
326
|
+
You can also use continuations on the synchronous Cartesia client to stream in text as it becomes available. To do this, pass in a text generator that produces text chunks at intervals of less than 1 second, as shown below. This ensures smooth audio playback.
|
327
|
+
|
328
|
+
Note: the sync client has a different API for continuations compared to the async client.
|
329
|
+
|
330
|
+
```python
|
331
|
+
from cartesia import Cartesia
|
332
|
+
import pyaudio
|
333
|
+
import os
|
334
|
+
|
335
|
+
client = Cartesia(api_key=os.environ.get("CARTESIA_API_KEY"))
|
336
|
+
|
337
|
+
transcripts = [
|
338
|
+
"The crew engaged in a range of activities designed to mirror those "
|
339
|
+
"they might perform on a real Mars mission. ",
|
340
|
+
"Aside from growing vegetables and maintaining their habitat, they faced "
|
341
|
+
"additional stressors like communication delays with Earth, ",
|
342
|
+
"up to twenty-two minutes each way, to simulate the distance from Mars to our planet. ",
|
343
|
+
"These exercises were critical for understanding how astronauts can "
|
344
|
+
"maintain not just physical health but also mental well-being under such challenging conditions. ",
|
345
|
+
]
|
346
|
+
|
347
|
+
# Ending each transcript with a space makes the audio smoother
|
348
|
+
def chunk_generator(transcripts):
|
349
|
+
for transcript in transcripts:
|
350
|
+
if transcript.endswith(" "):
|
351
|
+
yield transcript
|
352
|
+
else:
|
353
|
+
yield transcript + " "
|
354
|
+
|
355
|
+
|
356
|
+
# You can check out voice IDs by calling `client.voices.list()` or on https://play.cartesia.ai/
|
357
|
+
voice_id = "87748186-23bb-4158-a1eb-332911b0b708"
|
358
|
+
|
359
|
+
# You can check out our models at https://docs.cartesia.ai/getting-started/available-models
|
360
|
+
model_id = "sonic-english"
|
361
|
+
|
362
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
363
|
+
output_format = {
|
364
|
+
"container": "raw",
|
365
|
+
"encoding": "pcm_f32le",
|
366
|
+
"sample_rate": 44100,
|
367
|
+
}
|
368
|
+
|
369
|
+
p = pyaudio.PyAudio()
|
370
|
+
rate = 44100
|
371
|
+
|
372
|
+
stream = None
|
373
|
+
|
374
|
+
# Set up the websocket connection
|
375
|
+
ws = client.tts.websocket()
|
376
|
+
|
377
|
+
# Create a context to send and receive audio
|
378
|
+
ctx = ws.context() # Generates a random context ID if not provided
|
379
|
+
|
380
|
+
# Pass in a text generator to generate & stream the audio
|
381
|
+
output_stream = ctx.send(
|
382
|
+
model_id=model_id,
|
383
|
+
transcript=chunk_generator(transcripts),
|
384
|
+
voice_id=voice_id,
|
385
|
+
output_format=output_format,
|
386
|
+
)
|
387
|
+
|
388
|
+
for output in output_stream:
|
389
|
+
buffer = output["audio"]
|
390
|
+
|
391
|
+
if not stream:
|
392
|
+
stream = p.open(format=pyaudio.paFloat32, channels=1, rate=rate, output=True)
|
393
|
+
|
394
|
+
# Write the audio data to the stream
|
395
|
+
stream.write(buffer)
|
396
|
+
|
397
|
+
stream.stop_stream()
|
398
|
+
stream.close()
|
399
|
+
p.terminate()
|
400
|
+
|
401
|
+
ws.close() # Close the websocket connection
|
402
|
+
```
|
403
|
+
|
325
404
|
### Multilingual Text-to-Speech [Alpha]
|
326
405
|
|
327
406
|
You can use our `sonic-multilingual` model to generate audio in multiple languages. The languages supported are available at [docs.cartesia.ai](https://docs.cartesia.ai/getting-started/available-models).
|
@@ -339,10 +418,10 @@ voice = client.voices.get(id=voice_id)
|
|
339
418
|
transcript = "Hola! Bienvenido a Cartesia"
|
340
419
|
language = "es" # Language code corresponding to the language of the transcript
|
341
420
|
|
342
|
-
# Make sure you use the multilingual model! You can check out all models at
|
421
|
+
# Make sure you use the multilingual model! You can check out all models at https://docs.cartesia.ai/getting-started/available-models
|
343
422
|
model_id = "sonic-multilingual"
|
344
423
|
|
345
|
-
# You can find the supported `output_format`s
|
424
|
+
# You can find the supported `output_format`s at https://docs.cartesia.ai/api-reference/endpoints/stream-speech-server-sent-events
|
346
425
|
output_format = {
|
347
426
|
"container": "raw",
|
348
427
|
"encoding": "pcm_f32le",
|
@@ -354,9 +354,41 @@ def test_websocket_send_multilingual(resources: _Resources, stream: bool, langua
|
|
354
354
|
|
355
355
|
ws.close()
|
356
356
|
|
357
|
+
|
358
|
+
def chunk_generator(transcripts):
|
359
|
+
for transcript in transcripts:
|
360
|
+
if transcript.endswith(" "):
|
361
|
+
yield transcript
|
362
|
+
else:
|
363
|
+
yield transcript + " "
|
364
|
+
|
365
|
+
def test_sync_continuation_websocket_context_send():
|
366
|
+
logger.info("Testing sync continuation WebSocket context send")
|
367
|
+
client = create_client()
|
368
|
+
ws = client.tts.websocket()
|
369
|
+
context_id = str(uuid.uuid4())
|
370
|
+
try:
|
371
|
+
ctx = ws.context(context_id)
|
372
|
+
transcripts = ["Hello, world!", "I'\''m generating audio on Cartesia."]
|
373
|
+
output_generate = ctx.send(
|
374
|
+
model_id=DEFAULT_MODEL_ID,
|
375
|
+
transcript=chunk_generator(transcripts),
|
376
|
+
voice_id=SAMPLE_VOICE_ID,
|
377
|
+
output_format={
|
378
|
+
"container": "raw",
|
379
|
+
"encoding": "pcm_f32le",
|
380
|
+
"sample_rate": 44100
|
381
|
+
},
|
382
|
+
)
|
383
|
+
for out in output_generate:
|
384
|
+
assert out.keys() == {"audio", "context_id"}
|
385
|
+
assert isinstance(out["audio"], bytes)
|
386
|
+
finally:
|
387
|
+
ws.close()
|
388
|
+
|
357
389
|
@pytest.mark.asyncio
|
358
390
|
async def test_continuation_websocket_context_send():
|
359
|
-
logger.info("Testing continuation WebSocket context send")
|
391
|
+
logger.info("Testing async continuation WebSocket context send")
|
360
392
|
async_client = create_async_client()
|
361
393
|
ws = await async_client.tts.websocket()
|
362
394
|
context_id = str(uuid.uuid4())
|
@@ -387,7 +419,7 @@ async def test_continuation_websocket_context_send():
|
|
387
419
|
|
388
420
|
@pytest.mark.asyncio
|
389
421
|
async def test_continuation_websocket_context_send_incorrect_transcript():
|
390
|
-
logger.info("Testing continuation WebSocket context send with incorrect transcript")
|
422
|
+
logger.info("Testing async continuation WebSocket context send with incorrect transcript")
|
391
423
|
transcript = "Hello, world! I'\''m generating audio on Cartesia."
|
392
424
|
async_client = create_async_client()
|
393
425
|
ws = await async_client.tts.websocket()
|
@@ -421,7 +453,7 @@ async def test_continuation_websocket_context_send_incorrect_transcript():
|
|
421
453
|
|
422
454
|
@pytest.mark.asyncio
|
423
455
|
async def test_continuation_websocket_context_send_incorrect_voice_id():
|
424
|
-
logger.info("Testing continuation WebSocket context send with incorrect voice_id")
|
456
|
+
logger.info("Testing async continuation WebSocket context send with incorrect voice_id")
|
425
457
|
async_client = create_async_client()
|
426
458
|
ws = await async_client.tts.websocket()
|
427
459
|
context_id = str(uuid.uuid4())
|
@@ -454,7 +486,7 @@ async def test_continuation_websocket_context_send_incorrect_voice_id():
|
|
454
486
|
|
455
487
|
@pytest.mark.asyncio
|
456
488
|
async def test_continuation_websocket_context_send_incorrect_output_format():
|
457
|
-
logger.info("Testing continuation WebSocket context send with incorrect output_format")
|
489
|
+
logger.info("Testing async continuation WebSocket context send with incorrect output_format")
|
458
490
|
async_client = create_async_client()
|
459
491
|
ws = await async_client.tts.websocket()
|
460
492
|
context_id = str(uuid.uuid4())
|
@@ -487,7 +519,7 @@ async def test_continuation_websocket_context_send_incorrect_output_format():
|
|
487
519
|
|
488
520
|
@pytest.mark.asyncio
|
489
521
|
async def test_continuation_websocket_context_send_incorrect_model_id():
|
490
|
-
logger.info("Testing continuation WebSocket context send with incorrect model_id")
|
522
|
+
logger.info("Testing async continuation WebSocket context send with incorrect model_id")
|
491
523
|
async_client = create_async_client()
|
492
524
|
ws = await async_client.tts.websocket()
|
493
525
|
try:
|
@@ -516,7 +548,7 @@ async def test_continuation_websocket_context_send_incorrect_model_id():
|
|
516
548
|
|
517
549
|
@pytest.mark.asyncio
|
518
550
|
async def test_continuation_websocket_context_send_incorrect_context_id():
|
519
|
-
logger.info("Testing continuation WebSocket context send with incorrect context_id")
|
551
|
+
logger.info("Testing async continuation WebSocket context send with incorrect context_id")
|
520
552
|
async_client = create_async_client()
|
521
553
|
ws = await async_client.tts.websocket()
|
522
554
|
try:
|
@@ -549,7 +581,7 @@ async def test_continuation_websocket_context_send_incorrect_context_id():
|
|
549
581
|
|
550
582
|
@pytest.mark.asyncio
|
551
583
|
async def test_continuation_websocket_context_twice_on_same_context():
|
552
|
-
logger.info("Testing continuation WebSocket context twice on same context")
|
584
|
+
logger.info("Testing async continuation WebSocket context twice on same context")
|
553
585
|
async_client = create_async_client()
|
554
586
|
ws = await async_client.tts.websocket()
|
555
587
|
context_id = str(uuid.uuid4())
|
@@ -622,7 +654,7 @@ async def context_runner(ws, transcripts):
|
|
622
654
|
|
623
655
|
@pytest.mark.asyncio
|
624
656
|
async def test_continuation_websocket_context_three_contexts_parallel():
|
625
|
-
logger.info("Testing continuation WebSocket context three contexts parallel")
|
657
|
+
logger.info("Testing async continuation WebSocket context three contexts parallel")
|
626
658
|
async_client = create_async_client()
|
627
659
|
ws = await async_client.tts.websocket()
|
628
660
|
try:
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__ = "1.0.4"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|