cartesia 1.0.11__tar.gz → 1.0.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cartesia
3
- Version: 1.0.11
3
+ Version: 1.0.13
4
4
  Summary: The official Python library for the Cartesia API.
5
5
  Home-page:
6
6
  Author: Cartesia, Inc.
@@ -73,6 +73,11 @@ print("The embedding for", voice["name"], "is", voice["embedding"])
73
73
  # Clone a voice using filepath
74
74
  cloned_voice_embedding = client.voices.clone(filepath="path/to/voice")
75
75
 
76
+ # Mix voices together
77
+ mixed_voice_embedding = client.voices.mix(
78
+ [{ "id": "voice_id_1", "weight": 0.5 }, { "id": "voice_id_2", "weight": 0.25 }, { "id": "voice_id_3", "weight": 0.25 }]
79
+ )
80
+
76
81
  # Create a new voice
77
82
  new_voice = client.voices.create(
78
83
  name="New Voice",
@@ -504,6 +509,7 @@ You can enhance the voice output by adjusting the `speed` and `emotion` paramete
504
509
 
505
510
  Speed Options:
506
511
  - `slowest`, `slow`, `normal`, `fast`, `fastest`
512
+ - Float values between -1.0 and 1.0, where -1.0 is the slowest speed and 1.0 is the fastest speed.
507
513
 
508
514
  Emotion Options:
509
515
  Use a list of tags in the format `emotion_name:level` where:
@@ -56,6 +56,11 @@ print("The embedding for", voice["name"], "is", voice["embedding"])
56
56
  # Clone a voice using filepath
57
57
  cloned_voice_embedding = client.voices.clone(filepath="path/to/voice")
58
58
 
59
+ # Mix voices together
60
+ mixed_voice_embedding = client.voices.mix(
61
+ [{ "id": "voice_id_1", "weight": 0.5 }, { "id": "voice_id_2", "weight": 0.25 }, { "id": "voice_id_3", "weight": 0.25 }]
62
+ )
63
+
59
64
  # Create a new voice
60
65
  new_voice = client.voices.create(
61
66
  name="New Voice",
@@ -487,6 +492,7 @@ You can enhance the voice output by adjusting the `speed` and `emotion` paramete
487
492
 
488
493
  Speed Options:
489
494
  - `slowest`, `slow`, `normal`, `fast`, `fastest`
495
+ - Float values between -1.0 and 1.0, where -1.0 is the slowest speed and 1.0 is the fastest speed.
490
496
 
491
497
  Emotion Options:
492
498
  Use a list of tags in the format `emotion_name:level` where:
@@ -32,7 +32,6 @@ except ImportError:
32
32
  IS_WEBSOCKET_SYNC_AVAILABLE = False
33
33
 
34
34
  from iterators import TimeoutIterator
35
- from websockets.sync.client import connect
36
35
 
37
36
  from cartesia._types import (
38
37
  DeprecatedOutputFormatMapping,
@@ -50,6 +49,7 @@ DEFAULT_BASE_URL = "api.cartesia.ai"
50
49
  DEFAULT_CARTESIA_VERSION = "2024-06-10" # latest version
51
50
  DEFAULT_TIMEOUT = 30 # seconds
52
51
  DEFAULT_NUM_CONNECTIONS = 10 # connections per client
52
+ DEFAULT_VOICE_EMBEDDING = [1.0] * 192 # Default voice embedding is a 192 sized float array
53
53
 
54
54
  BACKOFF_FACTOR = 1
55
55
  MAX_RETRIES = 3
@@ -261,6 +261,40 @@ class Voices(Resource):
261
261
 
262
262
  return response.json()
263
263
 
264
+ def mix(self, voices: List[Dict[str, Union[str, float]]]) -> List[float]:
265
+ """Mix multiple voices together.
266
+
267
+ Args:
268
+ voices: A list of dictionaries, each containing either:
269
+ - 'id': The ID of an existing voice
270
+ - 'embedding': A voice embedding
271
+ AND
272
+ - 'weight': The weight of the voice in the mix (0.0 to 1.0)
273
+
274
+ Returns:
275
+ The embedding of the mixed voice as a list of floats.
276
+
277
+ Raises:
278
+ ValueError: If the request fails or if the input is invalid.
279
+ """
280
+ url = f"{self._http_url()}/voices/mix"
281
+
282
+ if not voices or not isinstance(voices, list):
283
+ raise ValueError("voices must be a non-empty list")
284
+
285
+ response = httpx.post(
286
+ url,
287
+ headers=self.headers,
288
+ json={"voices": voices},
289
+ timeout=self.timeout,
290
+ )
291
+
292
+ if not response.is_success:
293
+ raise ValueError(f"Failed to mix voices. Error: {response.text}")
294
+
295
+ result = response.json()
296
+ return result["embedding"]
297
+
264
298
 
265
299
  class _TTSContext:
266
300
  """Manage a single context over a WebSocket.
@@ -857,15 +891,17 @@ class TTS(Resource):
857
891
  if voice_id is None and voice_embedding is None:
858
892
  raise ValueError("Either voice_id or voice_embedding must be specified.")
859
893
 
860
- if voice_id is not None and voice_embedding is not None:
861
- raise ValueError("Only one of voice_id or voice_embedding should be specified.")
894
+ voice = {}
895
+
896
+ if voice_id is not None:
897
+ voice["id"] = voice_id
898
+
899
+ if voice_embedding is not None:
900
+ voice["embedding"] = voice_embedding
862
901
 
863
- if voice_id:
864
- voice = {"mode": "id", "id": voice_id}
865
- else:
866
- voice = {"mode": "embedding", "embedding": voice_embedding}
867
902
  if experimental_voice_controls is not None:
868
903
  voice["__experimental_controls"] = experimental_voice_controls
904
+
869
905
  return voice
870
906
 
871
907
 
@@ -1129,7 +1165,7 @@ class _AsyncTTSContext:
1129
1165
  model_id=DEFAULT_MODEL_ID,
1130
1166
  transcript="",
1131
1167
  output_format=TTS.get_output_format("raw_pcm_f32le_44100"),
1132
- voice_id="a0e99841-438c-4a64-b679-ae501e7d6091", # Default voice ID since it's a required input for now
1168
+ voice_embedding=DEFAULT_VOICE_EMBEDDING, # Default voice embedding since it's a required input for now.
1133
1169
  context_id=self._context_id,
1134
1170
  continue_=False,
1135
1171
  )
@@ -0,0 +1 @@
1
+ __version__ = "1.0.13"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cartesia
3
- Version: 1.0.11
3
+ Version: 1.0.13
4
4
  Summary: The official Python library for the Cartesia API.
5
5
  Home-page:
6
6
  Author: Cartesia, Inc.
@@ -73,6 +73,11 @@ print("The embedding for", voice["name"], "is", voice["embedding"])
73
73
  # Clone a voice using filepath
74
74
  cloned_voice_embedding = client.voices.clone(filepath="path/to/voice")
75
75
 
76
+ # Mix voices together
77
+ mixed_voice_embedding = client.voices.mix(
78
+ [{ "id": "voice_id_1", "weight": 0.5 }, { "id": "voice_id_2", "weight": 0.25 }, { "id": "voice_id_3", "weight": 0.25 }]
79
+ )
80
+
76
81
  # Create a new voice
77
82
  new_voice = client.voices.create(
78
83
  name="New Voice",
@@ -504,6 +509,7 @@ You can enhance the voice output by adjusting the `speed` and `emotion` paramete
504
509
 
505
510
  Speed Options:
506
511
  - `slowest`, `slow`, `normal`, `fast`, `fastest`
512
+ - Float values between -1.0 and 1.0, where -1.0 is the slowest speed and 1.0 is the fastest speed.
507
513
 
508
514
  Emotion Options:
509
515
  Use a list of tags in the format `emotion_name:level` where: