cartesia 1.0.12__py2.py3-none-any.whl → 1.0.14__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cartesia/_websocket.py ADDED
@@ -0,0 +1,374 @@
1
+ import base64
2
+ import json
3
+ import uuid
4
+ from collections import defaultdict
5
+ from typing import Any, Dict, Generator, Iterator, List, Optional, Set, Union
6
+
7
+ try:
8
+ from websockets.sync.client import connect
9
+
10
+ IS_WEBSOCKET_SYNC_AVAILABLE = True
11
+ except ImportError:
12
+ IS_WEBSOCKET_SYNC_AVAILABLE = False
13
+
14
+ from iterators import TimeoutIterator
15
+
16
+ from cartesia._types import EventType, OutputFormat, VoiceControls
17
+ from cartesia.utils.tts import _validate_and_construct_voice
18
+
19
+
20
+ class _TTSContext:
21
+ """Manage a single context over a WebSocket.
22
+
23
+ This class can be used to stream inputs, as they become available, to a specific `context_id`. See README for usage.
24
+
25
+ See :class:`_AsyncTTSContext` for asynchronous use cases.
26
+
27
+ Each TTSContext will close automatically when a done message is received for that context. It also closes if there is an error.
28
+ """
29
+
30
+ def __init__(self, context_id: str, websocket: "_WebSocket"):
31
+ self._context_id = context_id
32
+ self._websocket = websocket
33
+ self._error = None
34
+
35
+ def __del__(self):
36
+ self._close()
37
+
38
+ @property
39
+ def context_id(self) -> str:
40
+ return self._context_id
41
+
42
+ def send(
43
+ self,
44
+ model_id: str,
45
+ transcript: Iterator[str],
46
+ output_format: OutputFormat,
47
+ voice_id: Optional[str] = None,
48
+ voice_embedding: Optional[List[float]] = None,
49
+ context_id: Optional[str] = None,
50
+ duration: Optional[int] = None,
51
+ language: Optional[str] = None,
52
+ add_timestamps: bool = False,
53
+ _experimental_voice_controls: Optional[VoiceControls] = None,
54
+ ) -> Generator[bytes, None, None]:
55
+ """Send audio generation requests to the WebSocket and yield responses.
56
+
57
+ Args:
58
+ model_id: The ID of the model to use for generating audio.
59
+ transcript: Iterator over text chunks with <1s latency.
60
+ output_format: A dictionary containing the details of the output format.
61
+ voice_id: The ID of the voice to use for generating audio.
62
+ voice_embedding: The embedding of the voice to use for generating audio.
63
+ context_id: The context ID to use for the request. If not specified, a random context ID will be generated.
64
+ duration: The duration of the audio in seconds.
65
+ language: The language code for the audio request. This can only be used with `model_id = sonic-multilingual`
66
+ add_timestamps: Whether to return word-level timestamps.
67
+ _experimental_voice_controls: Experimental voice controls for controlling speed and emotion.
68
+ Note: This is an experimental feature and may change rapidly in future releases.
69
+
70
+ Yields:
71
+ Dictionary containing the following key(s):
72
+ - audio: The audio as bytes.
73
+ - context_id: The context ID for the request.
74
+
75
+ Raises:
76
+ ValueError: If provided context_id doesn't match the current context.
77
+ RuntimeError: If there's an error generating audio.
78
+ """
79
+ if context_id is not None and context_id != self._context_id:
80
+ raise ValueError("Context ID does not match the context ID of the current context.")
81
+
82
+ self._websocket.connect()
83
+
84
+ voice = _validate_and_construct_voice(
85
+ voice_id,
86
+ voice_embedding=voice_embedding,
87
+ experimental_voice_controls=_experimental_voice_controls,
88
+ )
89
+
90
+ # Create the initial request body
91
+ request_body = {
92
+ "model_id": model_id,
93
+ "voice": voice,
94
+ "output_format": {
95
+ "container": output_format["container"],
96
+ "encoding": output_format["encoding"],
97
+ "sample_rate": output_format["sample_rate"],
98
+ },
99
+ "context_id": self._context_id,
100
+ "language": language,
101
+ "add_timestamps": add_timestamps,
102
+ }
103
+
104
+ if duration is not None:
105
+ request_body["duration"] = duration
106
+
107
+ try:
108
+ # Create an iterator with a timeout to get text chunks
109
+ text_iterator = TimeoutIterator(
110
+ transcript, timeout=0.001
111
+ ) # 1ms timeout for nearly non-blocking receive
112
+ next_chunk = next(text_iterator, None)
113
+
114
+ while True:
115
+ # Send the next text chunk to the WebSocket if available
116
+ if next_chunk is not None and next_chunk != text_iterator.get_sentinel():
117
+ request_body["transcript"] = next_chunk
118
+ request_body["continue"] = True
119
+ self._websocket.websocket.send(json.dumps(request_body))
120
+ next_chunk = next(text_iterator, None)
121
+
122
+ try:
123
+ # Receive responses from the WebSocket with a small timeout
124
+ response = json.loads(
125
+ self._websocket.websocket.recv(timeout=0.001)
126
+ ) # 1ms timeout for nearly non-blocking receive
127
+ if response["context_id"] != self._context_id:
128
+ pass
129
+ if "error" in response:
130
+ raise RuntimeError(f"Error generating audio:\n{response['error']}")
131
+ if response["done"]:
132
+ break
133
+ if response["data"]:
134
+ yield self._websocket._convert_response(
135
+ response=response, include_context_id=True
136
+ )
137
+ except TimeoutError:
138
+ pass
139
+
140
+ # Continuously receive from WebSocket until the next text chunk is available
141
+ while next_chunk == text_iterator.get_sentinel():
142
+ try:
143
+ response = json.loads(self._websocket.websocket.recv(timeout=0.001))
144
+ if response["context_id"] != self._context_id:
145
+ continue
146
+ if "error" in response:
147
+ raise RuntimeError(f"Error generating audio:\n{response['error']}")
148
+ if response["done"]:
149
+ break
150
+ if response["data"]:
151
+ yield self._websocket._convert_response(
152
+ response=response, include_context_id=True
153
+ )
154
+ except TimeoutError:
155
+ pass
156
+ next_chunk = next(text_iterator, None)
157
+
158
+ # Send final message if all input text chunks are exhausted
159
+ if next_chunk is None:
160
+ request_body["transcript"] = ""
161
+ request_body["continue"] = False
162
+ self._websocket.websocket.send(json.dumps(request_body))
163
+ break
164
+
165
+ # Receive remaining messages from the WebSocket until "done" is received
166
+ while True:
167
+ response = json.loads(self._websocket.websocket.recv())
168
+ if response["context_id"] != self._context_id:
169
+ continue
170
+ if "error" in response:
171
+ raise RuntimeError(f"Error generating audio:\n{response['error']}")
172
+ if response["done"]:
173
+ break
174
+ yield self._websocket._convert_response(response=response, include_context_id=True)
175
+
176
+ except Exception as e:
177
+ self._websocket.close()
178
+ raise RuntimeError(f"Failed to generate audio. {e}")
179
+
180
+ def _close(self):
181
+ """Closes the context. Automatically called when a done message is received for this context."""
182
+ self._websocket._remove_context(self._context_id)
183
+
184
+ def is_closed(self):
185
+ """Check if the context is closed or not. Returns True if closed."""
186
+ return self._context_id not in self._websocket._contexts
187
+
188
+
189
+ class _WebSocket:
190
+ """This class contains methods to generate audio using WebSocket. Ideal for low-latency audio generation.
191
+
192
+ Usage:
193
+ >>> ws = client.tts.websocket()
194
+ >>> for audio_chunk in ws.send(
195
+ ... model_id="sonic-english", transcript="Hello world!", voice_embedding=embedding,
196
+ ... output_format={"container": "raw", "encoding": "pcm_f32le", "sample_rate": 44100},
197
+ ... context_id=context_id, stream=True
198
+ ... ):
199
+ ... audio = audio_chunk["audio"]
200
+ """
201
+
202
+ def __init__(
203
+ self,
204
+ ws_url: str,
205
+ api_key: str,
206
+ cartesia_version: str,
207
+ ):
208
+ self.ws_url = ws_url
209
+ self.api_key = api_key
210
+ self.cartesia_version = cartesia_version
211
+ self.websocket = None
212
+ self._contexts: Set[str] = set()
213
+
214
+ def __del__(self):
215
+ try:
216
+ self.close()
217
+ except Exception as e:
218
+ raise RuntimeError("Failed to close WebSocket: ", e)
219
+
220
+ def connect(self):
221
+ """This method connects to the WebSocket if it is not already connected.
222
+
223
+ Raises:
224
+ RuntimeError: If the connection to the WebSocket fails.
225
+ """
226
+ if not IS_WEBSOCKET_SYNC_AVAILABLE:
227
+ raise ImportError(
228
+ "The synchronous WebSocket client is not available. Please ensure that you have 'websockets>=12.0' or compatible version installed."
229
+ )
230
+ if self.websocket is None or self._is_websocket_closed():
231
+ route = "tts/websocket"
232
+ try:
233
+ self.websocket = connect(
234
+ f"{self.ws_url}/{route}?api_key={self.api_key}&cartesia_version={self.cartesia_version}"
235
+ )
236
+ except Exception as e:
237
+ raise RuntimeError(f"Failed to connect to WebSocket. {e}")
238
+
239
+ def _is_websocket_closed(self):
240
+ return self.websocket.socket.fileno() == -1
241
+
242
+ def close(self):
243
+ """This method closes the WebSocket connection. *Highly* recommended to call this method when done using the WebSocket."""
244
+ if self.websocket and not self._is_websocket_closed():
245
+ self.websocket.close()
246
+
247
+ if self._contexts:
248
+ self._contexts.clear()
249
+
250
+ def _convert_response(
251
+ self, response: Dict[str, any], include_context_id: bool
252
+ ) -> Dict[str, Any]:
253
+ out = {}
254
+ if response["type"] == EventType.AUDIO:
255
+ out["audio"] = base64.b64decode(response["data"])
256
+ elif response["type"] == EventType.TIMESTAMPS:
257
+ out["word_timestamps"] = response["word_timestamps"]
258
+
259
+ if include_context_id:
260
+ out["context_id"] = response["context_id"]
261
+
262
+ return out
263
+
264
+ def send(
265
+ self,
266
+ model_id: str,
267
+ transcript: str,
268
+ output_format: dict,
269
+ voice_id: Optional[str] = None,
270
+ voice_embedding: Optional[List[float]] = None,
271
+ context_id: Optional[str] = None,
272
+ duration: Optional[int] = None,
273
+ language: Optional[str] = None,
274
+ stream: bool = True,
275
+ add_timestamps: bool = False,
276
+ _experimental_voice_controls: Optional[VoiceControls] = None,
277
+ ) -> Union[bytes, Generator[bytes, None, None]]:
278
+ """Send a request to the WebSocket to generate audio.
279
+
280
+ Args:
281
+ model_id: The ID of the model to use for generating audio.
282
+ transcript: The text to convert to speech.
283
+ output_format: A dictionary containing the details of the output format.
284
+ voice_id: The ID of the voice to use for generating audio.
285
+ voice_embedding: The embedding of the voice to use for generating audio.
286
+ context_id: The context ID to use for the request. If not specified, a random context ID will be generated.
287
+ duration: The duration of the audio in seconds.
288
+ language: The language code for the audio request. This can only be used with `model_id = sonic-multilingual`
289
+ stream: Whether to stream the audio or not.
290
+ add_timestamps: Whether to return word-level timestamps.
291
+ _experimental_voice_controls: Experimental voice controls for controlling speed and emotion.
292
+ Note: This is an experimental feature and may change rapidly in future releases.
293
+
294
+ Returns:
295
+ If `stream` is True, the method returns a generator that yields chunks. Each chunk is a dictionary.
296
+ If `stream` is False, the method returns a dictionary.
297
+ Both the generator and the dictionary contain the following key(s):
298
+ - audio: The audio as bytes.
299
+ - context_id: The context ID for the request.
300
+ """
301
+ self.connect()
302
+
303
+ if context_id is None:
304
+ context_id = str(uuid.uuid4())
305
+
306
+ voice = _validate_and_construct_voice(
307
+ voice_id,
308
+ voice_embedding=voice_embedding,
309
+ experimental_voice_controls=_experimental_voice_controls,
310
+ )
311
+
312
+ request_body = {
313
+ "model_id": model_id,
314
+ "transcript": transcript,
315
+ "voice": voice,
316
+ "output_format": {
317
+ "container": output_format["container"],
318
+ "encoding": output_format["encoding"],
319
+ "sample_rate": output_format["sample_rate"],
320
+ },
321
+ "context_id": context_id,
322
+ "language": language,
323
+ "add_timestamps": add_timestamps,
324
+ }
325
+
326
+ if duration is not None:
327
+ request_body["duration"] = duration
328
+
329
+ generator = self._websocket_generator(request_body)
330
+
331
+ if stream:
332
+ return generator
333
+
334
+ chunks = []
335
+ word_timestamps = defaultdict(list)
336
+ for chunk in generator:
337
+ if "audio" in chunk:
338
+ chunks.append(chunk["audio"])
339
+ if add_timestamps and "word_timestamps" in chunk:
340
+ for k, v in chunk["word_timestamps"].items():
341
+ word_timestamps[k].extend(v)
342
+ out = {"audio": b"".join(chunks), "context_id": context_id}
343
+ if add_timestamps:
344
+ out["word_timestamps"] = word_timestamps
345
+ return out
346
+
347
+ def _websocket_generator(self, request_body: Dict[str, Any]):
348
+ self.websocket.send(json.dumps(request_body))
349
+
350
+ try:
351
+ while True:
352
+ response = json.loads(self.websocket.recv())
353
+ if "error" in response:
354
+ raise RuntimeError(f"Error generating audio:\n{response['error']}")
355
+ if response["done"]:
356
+ break
357
+ yield self._convert_response(response=response, include_context_id=True)
358
+ except Exception as e:
359
+ # Close the websocket connection if an error occurs.
360
+ self.close()
361
+ raise RuntimeError(f"Failed to generate audio. {response}") from e
362
+
363
+ def _remove_context(self, context_id: str):
364
+ if context_id in self._contexts:
365
+ self._contexts.remove(context_id)
366
+
367
+ def context(self, context_id: Optional[str] = None) -> _TTSContext:
368
+ if context_id in self._contexts:
369
+ raise ValueError(f"Context for context ID {context_id} already exists.")
370
+ if context_id is None:
371
+ context_id = str(uuid.uuid4())
372
+ if context_id not in self._contexts:
373
+ self._contexts.add(context_id)
374
+ return _TTSContext(context_id, self)
@@ -0,0 +1,82 @@
1
+ import asyncio
2
+ from types import TracebackType
3
+ from typing import Optional, Union
4
+
5
+ import aiohttp
6
+
7
+ from cartesia._constants import DEFAULT_NUM_CONNECTIONS, DEFAULT_TIMEOUT
8
+ from cartesia.async_tts import AsyncTTS
9
+ from cartesia.client import Cartesia
10
+
11
+
12
+ class AsyncCartesia(Cartesia):
13
+ """The asynchronous version of the Cartesia client."""
14
+
15
+ def __init__(
16
+ self,
17
+ *,
18
+ api_key: Optional[str] = None,
19
+ base_url: Optional[str] = None,
20
+ timeout: float = DEFAULT_TIMEOUT,
21
+ max_num_connections: int = DEFAULT_NUM_CONNECTIONS,
22
+ ):
23
+ """
24
+ Args:
25
+ api_key: See :class:`Cartesia`.
26
+ base_url: See :class:`Cartesia`.
27
+ timeout: See :class:`Cartesia`.
28
+ max_num_connections: The maximum number of concurrent connections to use for the client.
29
+ This is used to limit the number of connections that can be made to the server.
30
+ """
31
+ self._session = None
32
+ self._loop = None
33
+ super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
34
+ self.max_num_connections = max_num_connections
35
+ self.tts = AsyncTTS(
36
+ api_key=self.api_key,
37
+ base_url=self._base_url,
38
+ timeout=self.timeout,
39
+ get_session=self._get_session,
40
+ )
41
+
42
+ async def _get_session(self):
43
+ current_loop = asyncio.get_event_loop()
44
+ if self._loop is not current_loop:
45
+ # If the loop has changed, close the session and create a new one.
46
+ await self.close()
47
+ if self._session is None or self._session.closed:
48
+ timeout = aiohttp.ClientTimeout(total=self.timeout)
49
+ connector = aiohttp.TCPConnector(limit=self.max_num_connections)
50
+ self._session = aiohttp.ClientSession(timeout=timeout, connector=connector)
51
+ self._loop = current_loop
52
+ return self._session
53
+
54
+ async def close(self):
55
+ """This method closes the session.
56
+
57
+ It is *strongly* recommended to call this method when you are done using the client.
58
+ """
59
+ if self._session is not None and not self._session.closed:
60
+ await self._session.close()
61
+
62
+ def __del__(self):
63
+ try:
64
+ loop = asyncio.get_running_loop()
65
+ except RuntimeError:
66
+ loop = None
67
+
68
+ if loop is None:
69
+ asyncio.run(self.close())
70
+ elif loop.is_running():
71
+ loop.create_task(self.close())
72
+
73
+ async def __aenter__(self):
74
+ return self
75
+
76
+ async def __aexit__(
77
+ self,
78
+ exc_type: Union[type, None],
79
+ exc: Union[BaseException, None],
80
+ exc_tb: Union[TracebackType, None],
81
+ ):
82
+ await self.close()
cartesia/async_tts.py ADDED
@@ -0,0 +1,22 @@
1
+ from cartesia._async_sse import _AsyncSSE
2
+ from cartesia._async_websocket import _AsyncWebSocket
3
+ from cartesia.tts import TTS
4
+
5
+
6
+ class AsyncTTS(TTS):
7
+ def __init__(self, api_key, base_url, timeout, get_session):
8
+ super().__init__(api_key, base_url, timeout)
9
+ self._get_session = get_session
10
+ self._sse_class = _AsyncSSE(self._http_url(), self.headers, self.timeout, get_session)
11
+ self.sse = self._sse_class.send
12
+
13
+ async def websocket(self) -> _AsyncWebSocket:
14
+ ws = _AsyncWebSocket(
15
+ self._ws_url(),
16
+ self.api_key,
17
+ self.cartesia_version,
18
+ self.timeout,
19
+ self._get_session,
20
+ )
21
+ await ws.connect()
22
+ return ws