together 1.5.28__py3-none-any.whl → 1.5.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -619,14 +619,29 @@ class APIRequestor:
619
619
  ) -> Tuple[TogetherResponse | Iterator[TogetherResponse], bool]:
620
620
  """Returns the response(s) and a bool indicating whether it is a stream."""
621
621
  content_type = result.headers.get("Content-Type", "")
622
+
622
623
  if stream and "text/event-stream" in content_type:
624
+ # SSE format streaming
623
625
  return (
624
626
  self._interpret_response_line(
625
627
  line, result.status_code, result.headers, stream=True
626
628
  )
627
629
  for line in parse_stream(result.iter_lines())
628
630
  ), True
631
+ elif stream and content_type in [
632
+ "audio/wav",
633
+ "audio/mpeg",
634
+ "application/octet-stream",
635
+ ]:
636
+ # Binary audio streaming - return chunks as binary data
637
+ def binary_stream_generator() -> Iterator[TogetherResponse]:
638
+ for chunk in result.iter_content(chunk_size=8192):
639
+ if chunk: # Skip empty chunks
640
+ yield TogetherResponse(chunk, dict(result.headers))
641
+
642
+ return binary_stream_generator(), True
629
643
  else:
644
+ # Non-streaming response
630
645
  if content_type in ["application/octet-stream", "audio/wav", "audio/mpeg"]:
631
646
  content = result.content
632
647
  else:
@@ -648,23 +663,49 @@ class APIRequestor:
648
663
  | tuple[TogetherResponse, bool]
649
664
  ):
650
665
  """Returns the response(s) and a bool indicating whether it is a stream."""
651
- if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
666
+ content_type = result.headers.get("Content-Type", "")
667
+
668
+ if stream and "text/event-stream" in content_type:
669
+ # SSE format streaming
652
670
  return (
653
671
  self._interpret_response_line(
654
672
  line, result.status, result.headers, stream=True
655
673
  )
656
674
  async for line in parse_stream_async(result.content)
657
675
  ), True
676
+ elif stream and content_type in [
677
+ "audio/wav",
678
+ "audio/mpeg",
679
+ "application/octet-stream",
680
+ ]:
681
+ # Binary audio streaming - return chunks as binary data
682
+ async def binary_stream_generator() -> (
683
+ AsyncGenerator[TogetherResponse, None]
684
+ ):
685
+ async for chunk in result.content.iter_chunked(8192):
686
+ if chunk: # Skip empty chunks
687
+ yield TogetherResponse(chunk, dict(result.headers))
688
+
689
+ return binary_stream_generator(), True
658
690
  else:
691
+ # Non-streaming response
659
692
  try:
660
- await result.read()
693
+ content = await result.read()
661
694
  except (aiohttp.ServerTimeoutError, asyncio.TimeoutError) as e:
662
695
  raise error.Timeout("Request timed out") from e
663
696
  except aiohttp.ClientError as e:
664
697
  utils.log_warn(e, body=result.content)
698
+
699
+ if content_type in ["application/octet-stream", "audio/wav", "audio/mpeg"]:
700
+ # Binary content - keep as bytes
701
+ response_content: str | bytes = content
702
+ else:
703
+ # Text content - decode to string
704
+ response_content = content.decode("utf-8")
705
+
665
706
  return (
666
707
  self._interpret_response_line(
667
- (await result.read()).decode("utf-8"),
708
+ response_content,
668
709
  result.status,
669
710
  result.headers,
670
711
  stream=False,
@@ -132,6 +132,10 @@ def endpoints(ctx: click.Context) -> None:
132
132
  type=int,
133
133
  help="Number of minutes of inactivity after which the endpoint will be automatically stopped. Set to 0 to disable.",
134
134
  )
135
+ @click.option(
136
+ "--availability-zone",
137
+ help="Start endpoint in specified availability zone (e.g., us-central-4b)",
138
+ )
135
139
  @click.option(
136
140
  "--wait",
137
141
  is_flag=True,
@@ -152,6 +156,7 @@ def create(
152
156
  no_speculative_decoding: bool,
153
157
  no_auto_start: bool,
154
158
  inactive_timeout: int | None,
159
+ availability_zone: str | None,
155
160
  wait: bool,
156
161
  ) -> None:
157
162
  """Create a new dedicated inference endpoint."""
@@ -177,6 +182,7 @@ def create(
177
182
  disable_speculative_decoding=no_speculative_decoding,
178
183
  state="STOPPED" if no_auto_start else "STARTED",
179
184
  inactive_timeout=inactive_timeout,
185
+ availability_zone=availability_zone,
180
186
  )
181
187
  except InvalidRequestError as e:
182
188
  print_api_error(e)
@@ -203,6 +209,8 @@ def create(
203
209
  click.echo(" Auto-start: disabled", err=True)
204
210
  if inactive_timeout is not None:
205
211
  click.echo(f" Inactive timeout: {inactive_timeout} minutes", err=True)
212
+ if availability_zone:
213
+ click.echo(f" Availability zone: {availability_zone}", err=True)
206
214
 
207
215
  click.echo(f"Endpoint created successfully, id: {response.id}", err=True)
208
216
 
@@ -337,13 +345,30 @@ def delete(client: Together, endpoint_id: str) -> None:
337
345
  type=click.Choice(["dedicated", "serverless"]),
338
346
  help="Filter by endpoint type",
339
347
  )
348
+ @click.option(
349
+ "--mine",
350
+ type=click.BOOL,
351
+ default=None,
352
+ help="true (only mine), default=all",
353
+ )
354
+ @click.option(
355
+ "--usage-type",
356
+ type=click.Choice(["on-demand", "reserved"]),
357
+ help="Filter by endpoint usage type",
358
+ )
340
359
  @click.pass_obj
341
360
  @handle_api_errors
342
361
  def list(
343
- client: Together, json: bool, type: Literal["dedicated", "serverless"] | None
362
+ client: Together,
363
+ json: bool,
364
+ type: Literal["dedicated", "serverless"] | None,
365
+ usage_type: Literal["on-demand", "reserved"] | None,
366
+ mine: bool | None,
344
367
  ) -> None:
345
368
  """List all inference endpoints (includes both dedicated and serverless endpoints)."""
346
- endpoints: List[ListEndpoint] = client.endpoints.list(type=type)
369
+ endpoints: List[ListEndpoint] = client.endpoints.list(
370
+ type=type, usage_type=usage_type, mine=mine
371
+ )
347
372
 
348
373
  if not endpoints:
349
374
  click.echo("No dedicated endpoints found", err=True)
@@ -432,3 +457,25 @@ def update(
432
457
 
433
458
  click.echo("Successfully updated endpoint", err=True)
434
459
  click.echo(endpoint_id)
460
+
461
+
462
+ @endpoints.command()
463
+ @click.option("--json", is_flag=True, help="Print output in JSON format")
464
+ @click.pass_obj
465
+ @handle_api_errors
466
+ def availability_zones(client: Together, json: bool) -> None:
467
+ """List all availability zones."""
468
+ avzones = client.endpoints.list_avzones()
469
+
470
+ if not avzones:
471
+ click.echo("No availability zones found", err=True)
472
+ return
473
+
474
+ if json:
475
+ import json as json_lib
476
+
477
+ click.echo(json_lib.dumps({"avzones": avzones}, indent=2))
478
+ else:
479
+ click.echo("Available zones:", err=True)
480
+ for availability_zone in sorted(avzones):
481
+ click.echo(f" {availability_zone}")
@@ -3,6 +3,7 @@ from functools import cached_property
3
3
  from together.resources.audio.speech import AsyncSpeech, Speech
4
4
  from together.resources.audio.transcriptions import AsyncTranscriptions, Transcriptions
5
5
  from together.resources.audio.translations import AsyncTranslations, Translations
6
+ from together.resources.audio.voices import AsyncVoices, Voices
6
7
  from together.types import (
7
8
  TogetherClient,
8
9
  )
@@ -24,6 +25,10 @@ class Audio:
24
25
  def translations(self) -> Translations:
25
26
  return Translations(self._client)
26
27
 
28
+ @cached_property
29
+ def voices(self) -> Voices:
30
+ return Voices(self._client)
31
+
27
32
 
28
33
  class AsyncAudio:
29
34
  def __init__(self, client: TogetherClient) -> None:
@@ -40,3 +45,7 @@ class AsyncAudio:
40
45
  @cached_property
41
46
  def translations(self) -> AsyncTranslations:
42
47
  return AsyncTranslations(self._client)
48
+
49
+ @cached_property
50
+ def voices(self) -> AsyncVoices:
51
+ return AsyncVoices(self._client)
@@ -30,7 +30,7 @@ class Speech:
30
30
  response_format: str = "wav",
31
31
  language: str = "en",
32
32
  response_encoding: str = "pcm_f32le",
33
- sample_rate: int = 44100,
33
+ sample_rate: int | None = None,
34
34
  stream: bool = False,
35
35
  **kwargs: Any,
36
36
  ) -> AudioSpeechStreamResponse:
@@ -49,7 +49,7 @@ class Speech:
49
49
  response_encoding (str, optional): Audio encoding of response.
50
50
  Defaults to "pcm_f32le".
51
51
  sample_rate (int, optional): Sampling rate to use for the output audio.
52
- Defaults to 44100.
52
+ Defaults to None. If not provided, the default sampling rate for the model will be used.
53
53
  stream (bool, optional): If true, output is streamed for several characters at a time.
54
54
  Defaults to False.
55
55
 
@@ -57,6 +57,12 @@ class Speech:
57
57
  Union[bytes, Iterator[AudioSpeechStreamChunk]]: The generated audio as bytes or an iterator over audio stream chunks.
58
58
  """
59
59
 
60
+ if sample_rate is None:
61
+ if "cartesia" in model:
62
+ sample_rate = 44100
63
+ else:
64
+ sample_rate = 24000
65
+
60
66
  requestor = api_requestor.APIRequestor(
61
67
  client=self._client,
62
68
  )
@@ -30,6 +30,7 @@ class Transcriptions:
30
30
  timestamp_granularities: Optional[
31
31
  Union[str, AudioTimestampGranularities]
32
32
  ] = None,
33
+ diarize: bool = False,
33
34
  **kwargs: Any,
34
35
  ) -> Union[AudioTranscriptionResponse, AudioTranscriptionVerboseResponse]:
35
36
  """
@@ -52,7 +53,11 @@ class Transcriptions:
52
53
  timestamp_granularities: The timestamp granularities to populate for this
53
54
  transcription. response_format must be set verbose_json to use timestamp
54
55
  granularities. Either or both of these options are supported: word, or segment.
55
-
56
+ diarize: Whether to enable speaker diarization. When enabled, you will get the speaker id for each word in the transcription.
57
+ In the response, in the words array, you will get the speaker id for each word.
58
+ In addition, we also return the speaker_segments array which contains the speaker id for each speaker segment along with the start and end time of the segment along with all the words in the segment.
59
+ You can use the speaker_id to group the words by speaker.
60
+ You can use the speaker_segments to get the start and end time of each speaker segment.
56
61
  Returns:
57
62
  The transcribed text in the requested format.
58
63
  """
@@ -103,6 +108,9 @@ class Transcriptions:
103
108
  else timestamp_granularities
104
109
  )
105
110
 
111
+ if diarize:
112
+ params_data["diarize"] = diarize
113
+
106
114
  # Add any additional kwargs
107
115
  # Convert boolean values to lowercase strings for proper form encoding
108
116
  for key, value in kwargs.items():
@@ -135,6 +143,7 @@ class Transcriptions:
135
143
  if (
136
144
  response_format == "verbose_json"
137
145
  or response_format == AudioTranscriptionResponseFormat.VERBOSE_JSON
146
+ or diarize
138
147
  ):
139
148
  # Create response with model validation that preserves extra fields
140
149
  return AudioTranscriptionVerboseResponse.model_validate(response.data)
@@ -158,6 +167,7 @@ class AsyncTranscriptions:
158
167
  timestamp_granularities: Optional[
159
168
  Union[str, AudioTimestampGranularities]
160
169
  ] = None,
170
+ diarize: bool = False,
161
171
  **kwargs: Any,
162
172
  ) -> Union[AudioTranscriptionResponse, AudioTranscriptionVerboseResponse]:
163
173
  """
@@ -180,7 +190,11 @@ class AsyncTranscriptions:
180
190
  timestamp_granularities: The timestamp granularities to populate for this
181
191
  transcription. response_format must be set verbose_json to use timestamp
182
192
  granularities. Either or both of these options are supported: word, or segment.
183
-
193
+ diarize: Whether to enable speaker diarization. When enabled, you will get the speaker id for each word in the transcription.
194
+ In the response, in the words array, you will get the speaker id for each word.
195
+ In addition, we also return the speaker_segments array which contains the speaker id for each speaker segment along with the start and end time of the segment along with all the words in the segment.
196
+ You can use the speaker_id to group the words by speaker.
197
+ You can use the speaker_segments to get the start and end time of each speaker segment.
184
198
  Returns:
185
199
  The transcribed text in the requested format.
186
200
  """
@@ -239,6 +253,9 @@ class AsyncTranscriptions:
239
253
  )
240
254
  )
241
255
 
256
+ if diarize:
257
+ params_data["diarize"] = diarize
258
+
242
259
  # Add any additional kwargs
243
260
  # Convert boolean values to lowercase strings for proper form encoding
244
261
  for key, value in kwargs.items():
@@ -271,6 +288,7 @@ class AsyncTranscriptions:
271
288
  if (
272
289
  response_format == "verbose_json"
273
290
  or response_format == AudioTranscriptionResponseFormat.VERBOSE_JSON
291
+ or diarize
274
292
  ):
275
293
  # Create response with model validation that preserves extra fields
276
294
  return AudioTranscriptionVerboseResponse.model_validate(response.data)
@@ -0,0 +1,65 @@
1
+ from __future__ import annotations
2
+
3
+ from together.abstract import api_requestor
4
+ from together.together_response import TogetherResponse
5
+ from together.types import (
6
+ TogetherClient,
7
+ TogetherRequest,
8
+ VoiceListResponse,
9
+ )
10
+
11
+
12
+ class Voices:
13
+ def __init__(self, client: TogetherClient) -> None:
14
+ self._client = client
15
+
16
+ def list(self) -> VoiceListResponse:
17
+ """
18
+ Method to return list of available voices on the API
19
+
20
+ Returns:
21
+ VoiceListResponse: Response containing models and their available voices
22
+ """
23
+ requestor = api_requestor.APIRequestor(
24
+ client=self._client,
25
+ )
26
+
27
+ response, _, _ = requestor.request(
28
+ options=TogetherRequest(
29
+ method="GET",
30
+ url="voices",
31
+ ),
32
+ stream=False,
33
+ )
34
+
35
+ assert isinstance(response, TogetherResponse)
36
+
37
+ return VoiceListResponse(**response.data)
38
+
39
+
40
+ class AsyncVoices:
41
+ def __init__(self, client: TogetherClient) -> None:
42
+ self._client = client
43
+
44
+ async def list(self) -> VoiceListResponse:
45
+ """
46
+ Async method to return list of available voices on the API
47
+
48
+ Returns:
49
+ VoiceListResponse: Response containing models and their available voices
50
+ """
51
+ requestor = api_requestor.APIRequestor(
52
+ client=self._client,
53
+ )
54
+
55
+ response, _, _ = await requestor.arequest(
56
+ options=TogetherRequest(
57
+ method="GET",
58
+ url="voices",
59
+ ),
60
+ stream=False,
61
+ )
62
+
63
+ assert isinstance(response, TogetherResponse)
64
+
65
+ return VoiceListResponse(**response.data)
@@ -13,13 +13,18 @@ class Endpoints:
13
13
  self._client = client
14
14
 
15
15
  def list(
16
- self, type: Optional[Literal["dedicated", "serverless"]] = None
16
+ self,
17
+ type: Optional[Literal["dedicated", "serverless"]] = None,
18
+ usage_type: Optional[Literal["on-demand", "reserved"]] = None,
19
+ mine: Optional[bool] = None,
17
20
  ) -> List[ListEndpoint]:
18
21
  """
19
- List all endpoints, can be filtered by type.
22
+ List all endpoints, can be filtered by endpoint type and ownership.
20
23
 
21
24
  Args:
22
- type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None.
25
+ type (str, optional): Filter endpoints by endpoint type ("dedicated" or "serverless"). Defaults to None.
26
+ usage_type (str, optional): Filter endpoints by usage type ("on-demand" or "reserved"). Defaults to None.
27
+ mine (bool, optional): If True, return only endpoints owned by the caller. Defaults to None.
23
28
 
24
29
  Returns:
25
30
  List[ListEndpoint]: List of endpoint objects
@@ -28,9 +33,20 @@ class Endpoints:
28
33
  client=self._client,
29
34
  )
30
35
 
31
- params = {}
36
+ params: Dict[
37
+ str,
38
+ Union[
39
+ Literal["dedicated", "serverless"],
40
+ Literal["on-demand", "reserved"],
41
+ bool,
42
+ ],
43
+ ] = {}
32
44
  if type is not None:
33
45
  params["type"] = type
46
+ if usage_type is not None:
47
+ params["usage_type"] = usage_type
48
+ if mine is not None:
49
+ params["mine"] = mine
34
50
 
35
51
  response, _, _ = requestor.request(
36
52
  options=TogetherRequest(
@@ -60,6 +76,7 @@ class Endpoints:
60
76
  disable_speculative_decoding: bool = True,
61
77
  state: Literal["STARTED", "STOPPED"] = "STARTED",
62
78
  inactive_timeout: Optional[int] = None,
79
+ availability_zone: Optional[str] = None,
63
80
  ) -> DedicatedEndpoint:
64
81
  """
65
82
  Create a new dedicated endpoint.
@@ -74,6 +91,7 @@ class Endpoints:
74
91
  disable_speculative_decoding (bool, optional): Whether to disable speculative decoding. Defaults to False.
75
92
  state (str, optional): The desired state of the endpoint. Defaults to "STARTED".
76
93
  inactive_timeout (int, optional): The number of minutes of inactivity after which the endpoint will be automatically stopped. Set to 0 to disable automatic timeout.
94
+ availability_zone (str, optional): Start endpoint in specified availability zone (e.g., us-central-4b).
77
95
 
78
96
  Returns:
79
97
  DedicatedEndpoint: Object containing endpoint information
@@ -100,6 +118,9 @@ class Endpoints:
100
118
  if inactive_timeout is not None:
101
119
  data["inactive_timeout"] = inactive_timeout
102
120
 
121
+ if availability_zone is not None:
122
+ data["availability_zone"] = availability_zone
123
+
103
124
  response, _, _ = requestor.request(
104
125
  options=TogetherRequest(
105
126
  method="POST",
@@ -257,19 +278,49 @@ class Endpoints:
257
278
 
258
279
  return [HardwareWithStatus(**item) for item in response.data["data"]]
259
280
 
281
+ def list_avzones(self) -> List[str]:
282
+ """
283
+ List all available availability zones.
284
+
285
+ Returns:
286
+ List[str]: List of unique availability zones
287
+ """
288
+ requestor = api_requestor.APIRequestor(
289
+ client=self._client,
290
+ )
291
+
292
+ response, _, _ = requestor.request(
293
+ options=TogetherRequest(
294
+ method="GET",
295
+ url="clusters/availability-zones",
296
+ ),
297
+ stream=False,
298
+ )
299
+
300
+ assert isinstance(response, TogetherResponse)
301
+ assert isinstance(response.data, dict)
302
+ assert isinstance(response.data["avzones"], list)
303
+
304
+ return response.data["avzones"]
305
+
260
306
 
261
307
  class AsyncEndpoints:
262
308
  def __init__(self, client: TogetherClient) -> None:
263
309
  self._client = client
264
310
 
265
311
  async def list(
266
- self, type: Optional[Literal["dedicated", "serverless"]] = None
312
+ self,
313
+ type: Optional[Literal["dedicated", "serverless"]] = None,
314
+ usage_type: Optional[Literal["on-demand", "reserved"]] = None,
315
+ mine: Optional[bool] = None,
267
316
  ) -> List[ListEndpoint]:
268
317
  """
269
- List all endpoints, can be filtered by type.
318
+ List all endpoints, can be filtered by type and ownership.
270
319
 
271
320
  Args:
272
321
  type (str, optional): Filter endpoints by type ("dedicated" or "serverless"). Defaults to None.
322
+ usage_type (str, optional): Filter endpoints by usage type ("on-demand" or "reserved"). Defaults to None.
323
+ mine (bool, optional): If True, return only endpoints owned by the caller. Defaults to None.
273
324
 
274
325
  Returns:
275
326
  List[ListEndpoint]: List of endpoint objects
@@ -278,9 +329,20 @@ class AsyncEndpoints:
278
329
  client=self._client,
279
330
  )
280
331
 
281
- params = {}
332
+ params: Dict[
333
+ str,
334
+ Union[
335
+ Literal["dedicated", "serverless"],
336
+ Literal["on-demand", "reserved"],
337
+ bool,
338
+ ],
339
+ ] = {}
282
340
  if type is not None:
283
341
  params["type"] = type
342
+ if usage_type is not None:
343
+ params["usage_type"] = usage_type
344
+ if mine is not None:
345
+ params["mine"] = mine
284
346
 
285
347
  response, _, _ = await requestor.arequest(
286
348
  options=TogetherRequest(
@@ -308,6 +370,7 @@ class AsyncEndpoints:
308
370
  disable_speculative_decoding: bool = True,
309
371
  state: Literal["STARTED", "STOPPED"] = "STARTED",
310
372
  inactive_timeout: Optional[int] = None,
373
+ availability_zone: Optional[str] = None,
311
374
  ) -> DedicatedEndpoint:
312
375
  """
313
376
  Create a new dedicated endpoint.
@@ -348,6 +411,9 @@ class AsyncEndpoints:
348
411
  if inactive_timeout is not None:
349
412
  data["inactive_timeout"] = inactive_timeout
350
413
 
414
+ if availability_zone is not None:
415
+ data["availability_zone"] = availability_zone
416
+
351
417
  response, _, _ = await requestor.arequest(
352
418
  options=TogetherRequest(
353
419
  method="POST",
@@ -506,3 +572,28 @@ class AsyncEndpoints:
506
572
  assert isinstance(response.data["data"], list)
507
573
 
508
574
  return [HardwareWithStatus(**item) for item in response.data["data"]]
575
+
576
+ async def list_avzones(self) -> List[str]:
577
+ """
578
+ List all availability zones.
579
+
580
+ Returns:
581
+ List[str]: List of unique availability zones
582
+ """
583
+ requestor = api_requestor.APIRequestor(
584
+ client=self._client,
585
+ )
586
+
587
+ response, _, _ = await requestor.arequest(
588
+ options=TogetherRequest(
589
+ method="GET",
590
+ url="clusters/availability-zones",
591
+ ),
592
+ stream=False,
593
+ )
594
+
595
+ assert isinstance(response, TogetherResponse)
596
+ assert isinstance(response.data, dict)
597
+ assert isinstance(response.data["avzones"], list)
598
+
599
+ return response.data["avzones"]
@@ -21,7 +21,6 @@ class Images:
21
21
  *,
22
22
  prompt: str,
23
23
  model: str,
24
- steps: int | None = 20,
25
24
  seed: int | None = None,
26
25
  n: int | None = 1,
27
26
  height: int | None = 1024,
@@ -37,8 +36,6 @@ class Images:
37
36
 
38
37
  model (str, optional): The model to use for image generation.
39
38
 
40
- steps (int, optional): Number of generation steps. Defaults to 20
41
-
42
39
  seed (int, optional): Seed used for generation. Can be used to reproduce image generations.
43
40
  Defaults to None.
44
41
 
@@ -51,7 +48,8 @@ class Images:
51
48
  negative_prompt (str, optional): The prompt or prompts not to guide the image generation.
52
49
  Defaults to None
53
50
 
54
- image_base64: (str, optional): Reference image used for generation. Defaults to None.
51
+ **kwargs: Additional parameters like steps (int, optional): Number of generation steps,
52
+ image_base64 (str, optional): Reference image used for generation, etc.
55
53
 
56
54
  Returns:
57
55
  ImageResponse: Object containing image data
@@ -64,7 +62,6 @@ class Images:
64
62
  parameter_payload = ImageRequest(
65
63
  prompt=prompt,
66
64
  model=model,
67
- steps=steps,
68
65
  seed=seed,
69
66
  n=n,
70
67
  height=height,
@@ -96,7 +93,6 @@ class AsyncImages:
96
93
  *,
97
94
  prompt: str,
98
95
  model: str,
99
- steps: int | None = 20,
100
96
  seed: int | None = None,
101
97
  n: int | None = 1,
102
98
  height: int | None = 1024,
@@ -112,8 +108,6 @@ class AsyncImages:
112
108
 
113
109
  model (str, optional): The model to use for image generation.
114
110
 
115
- steps (int, optional): Number of generation steps. Defaults to 20
116
-
117
111
  seed (int, optional): Seed used for generation. Can be used to reproduce image generations.
118
112
  Defaults to None.
119
113
 
@@ -126,7 +120,8 @@ class AsyncImages:
126
120
  negative_prompt (str, optional): The prompt or prompts not to guide the image generation.
127
121
  Defaults to None
128
122
 
129
- image_base64: (str, optional): Reference image used for generation. Defaults to None.
123
+ **kwargs: Additional parameters like steps (int, optional): Number of generation steps,
124
+ image_base64 (str, optional): Reference image used for generation, etc.
130
125
 
131
126
  Returns:
132
127
  ImageResponse: Object containing image data
@@ -139,7 +134,6 @@ class AsyncImages:
139
134
  parameter_payload = ImageRequest(
140
135
  prompt=prompt,
141
136
  model=model,
142
- steps=steps,
143
137
  seed=seed,
144
138
  n=n,
145
139
  height=height,
@@ -15,6 +15,8 @@ from together.types.audio_speech import (
15
15
  AudioTranslationVerboseResponse,
16
16
  AudioTranscriptionResponseFormat,
17
17
  AudioTimestampGranularities,
18
+ ModelVoices,
19
+ VoiceListResponse,
18
20
  )
19
21
  from together.types.chat_completions import (
20
22
  ChatCompletionChunk,
@@ -140,6 +142,8 @@ __all__ = [
140
142
  "AudioTranslationVerboseResponse",
141
143
  "AudioTranscriptionResponseFormat",
142
144
  "AudioTimestampGranularities",
145
+ "ModelVoices",
146
+ "VoiceListResponse",
143
147
  "DedicatedEndpoint",
144
148
  "ListEndpoint",
145
149
  "Autoscaling",
@@ -2,7 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  import base64
4
4
  from enum import Enum
5
- from typing import BinaryIO, Iterator, List, Optional, Union
5
+ from re import S
6
+ from typing import BinaryIO, Dict, Iterator, List, Optional, Union
6
7
 
7
8
  from pydantic import BaseModel, ConfigDict
8
9
 
@@ -82,27 +83,126 @@ class AudioSpeechStreamResponse(BaseModel):
82
83
 
83
84
  model_config = ConfigDict(arbitrary_types_allowed=True)
84
85
 
85
- def stream_to_file(self, file_path: str) -> None:
86
+ def stream_to_file(
87
+ self, file_path: str, response_format: AudioResponseFormat | str | None = None
88
+ ) -> None:
89
+ """
90
+ Save the audio response to a file.
91
+
92
+ For non-streaming responses, writes the complete file as received.
93
+ For streaming responses, collects binary chunks and constructs a valid
94
+ file format based on the response_format parameter.
95
+
96
+ Args:
97
+ file_path: Path where the audio file should be saved.
98
+ response_format: Format of the audio (wav, mp3, or raw). If not provided,
99
+ will attempt to infer from file extension or default to wav.
100
+ """
101
+ # Determine response format
102
+ if response_format is None:
103
+ # Infer from file extension
104
+ ext = file_path.lower().split(".")[-1] if "." in file_path else ""
105
+ if ext in ["wav"]:
106
+ response_format = AudioResponseFormat.WAV
107
+ elif ext in ["mp3", "mpeg"]:
108
+ response_format = AudioResponseFormat.MP3
109
+ elif ext in ["raw", "pcm"]:
110
+ response_format = AudioResponseFormat.RAW
111
+ else:
112
+ # Default to WAV if unknown
113
+ response_format = AudioResponseFormat.WAV
114
+
115
+ if isinstance(response_format, str):
116
+ response_format = AudioResponseFormat(response_format)
117
+
86
118
  if isinstance(self.response, TogetherResponse):
87
- # save response to file
119
+ # Non-streaming: save complete file
88
120
  with open(file_path, "wb") as f:
89
121
  f.write(self.response.data)
90
122
 
91
123
  elif isinstance(self.response, Iterator):
124
+ # Streaming: collect binary chunks
125
+ audio_chunks = []
126
+ for chunk in self.response:
127
+ if isinstance(chunk.data, bytes):
128
+ audio_chunks.append(chunk.data)
129
+ elif isinstance(chunk.data, dict):
130
+ # SSE format with JSON/base64
131
+ try:
132
+ stream_event = AudioSpeechStreamEventResponse(
133
+ response={"data": chunk.data}
134
+ )
135
+ if isinstance(stream_event.response, StreamSentinel):
136
+ break
137
+ audio_chunks.append(
138
+ base64.b64decode(stream_event.response.data.b64)
139
+ )
140
+ except Exception:
141
+ continue # Skip malformed chunks
142
+
143
+ if not audio_chunks:
144
+ raise ValueError("No audio data received in streaming response")
145
+
146
+ # Concatenate all chunks
147
+ audio_data = b"".join(audio_chunks)
148
+
92
149
  with open(file_path, "wb") as f:
93
- for chunk in self.response:
94
- # Try to parse as stream chunk
95
- stream_event_response = AudioSpeechStreamEventResponse(
96
- response={"data": chunk.data}
150
+ if response_format == AudioResponseFormat.WAV:
151
+ if audio_data.startswith(b"RIFF"):
152
+ # Already a valid WAV file
153
+ f.write(audio_data)
154
+ else:
155
+ # Raw PCM - add WAV header
156
+ self._write_wav_header(f, audio_data)
157
+ elif response_format == AudioResponseFormat.MP3:
158
+ # MP3 format: Check if data is actually MP3 or raw PCM
159
+ # MP3 files start with ID3 tag or sync word (0xFF 0xFB/0xFA/0xF3/0xF2)
160
+ is_mp3 = audio_data.startswith(b"ID3") or (
161
+ len(audio_data) > 0
162
+ and audio_data[0:1] == b"\xff"
163
+ and len(audio_data) > 1
164
+ and audio_data[1] & 0xE0 == 0xE0
97
165
  )
98
166
 
99
- if isinstance(stream_event_response.response, StreamSentinel):
100
- break
101
-
102
- # decode base64
103
- audio = base64.b64decode(stream_event_response.response.data.b64)
104
-
105
- f.write(audio)
167
+ if is_mp3:
168
+ f.write(audio_data)
169
+ else:
170
+ raise ValueError("Invalid MP3 data received.")
171
+ else:
172
+ # RAW format: write PCM data as-is
173
+ f.write(audio_data)
174
+
175
+ @staticmethod
176
+ def _write_wav_header(file_handle: BinaryIO, audio_data: bytes) -> None:
177
+ """
178
+ Write WAV file header for raw PCM audio data.
179
+
180
+ Uses default TTS parameters: 16-bit PCM, mono, 24000 Hz sample rate.
181
+ """
182
+ import struct
183
+
184
+ sample_rate = 24000
185
+ num_channels = 1
186
+ bits_per_sample = 16
187
+ byte_rate = sample_rate * num_channels * bits_per_sample // 8
188
+ block_align = num_channels * bits_per_sample // 8
189
+ data_size = len(audio_data)
190
+
191
+ # Write WAV header
192
+ file_handle.write(b"RIFF")
193
+ file_handle.write(struct.pack("<I", 36 + data_size)) # File size - 8
194
+ file_handle.write(b"WAVE")
195
+ file_handle.write(b"fmt ")
196
+ file_handle.write(struct.pack("<I", 16)) # fmt chunk size
197
+ file_handle.write(struct.pack("<H", 1)) # Audio format (1 = PCM)
198
+ file_handle.write(struct.pack("<H", num_channels))
199
+ file_handle.write(struct.pack("<I", sample_rate))
200
+ file_handle.write(struct.pack("<I", byte_rate))
201
+ file_handle.write(struct.pack("<H", block_align))
202
+ file_handle.write(struct.pack("<H", bits_per_sample))
203
+ file_handle.write(b"data")
204
+ file_handle.write(struct.pack("<I", data_size))
205
+ file_handle.write(audio_data)
106
206
 
107
207
 
108
208
  class AudioTranscriptionResponseFormat(str, Enum):
@@ -196,3 +296,16 @@ class AudioTranslationVerboseResponse(BaseModel):
196
296
  text: str
197
297
  segments: Optional[List[AudioTranscriptionSegment]] = None
198
298
  words: Optional[List[AudioTranscriptionWord]] = None
299
+
300
+
301
+ class ModelVoices(BaseModel):
302
+ """Represents a model with its available voices."""
303
+
304
+ model: str
305
+ voices: List[Dict[str, str]] # Each voice is a dict with 'name' key
306
+
307
+
308
+ class VoiceListResponse(BaseModel):
309
+ """Response containing a list of models and their available voices."""
310
+
311
+ data: List[ModelVoices]
together/types/images.py CHANGED
@@ -10,8 +10,6 @@ class ImageRequest(BaseModel):
10
10
  prompt: str
11
11
  # model to query
12
12
  model: str
13
- # num generation steps
14
- steps: int | None = 20
15
13
  # seed
16
14
  seed: int | None = None
17
15
  # number of results to return
together/utils/files.py CHANGED
@@ -102,81 +102,163 @@ def check_file(
102
102
  return report_dict
103
103
 
104
104
 
105
- def validate_messages(messages: List[Dict[str, str | bool]], idx: int) -> None:
106
- """Validate the messages column."""
105
+ def _check_conversation_type(messages: List[Dict[str, str | bool]], idx: int) -> None:
106
+ """Check that the conversation has correct type.
107
+
108
+ Args:
109
+ messages: The messages in the conversation.
110
+ Can be any type, this function ensures that the messages are a list of dictionaries.
111
+ idx: Line number in the file.
112
+
113
+ Raises:
114
+ InvalidFileFormatError: If the conversation type is invalid.
115
+ """
107
116
  if not isinstance(messages, list):
108
117
  raise InvalidFileFormatError(
109
118
  message=f"Invalid format on line {idx + 1} of the input file. "
110
- f"Expected a list of messages. Found {type(messages)}",
119
+ f"The `messages` column must be a list. Found {type(messages)}",
111
120
  line_number=idx + 1,
112
121
  error_source="key_value",
113
122
  )
114
- if not messages:
123
+ if len(messages) == 0:
115
124
  raise InvalidFileFormatError(
116
125
  message=f"Invalid format on line {idx + 1} of the input file. "
117
- f"Expected a non-empty list of messages. Found empty list",
126
+ f"The `messages` column must not be empty.",
118
127
  line_number=idx + 1,
119
128
  error_source="key_value",
120
129
  )
121
130
 
122
- has_weights = any("weight" in message for message in messages)
123
-
124
- previous_role = None
125
131
  for message in messages:
126
132
  if not isinstance(message, dict):
127
133
  raise InvalidFileFormatError(
128
134
  message=f"Invalid format on line {idx + 1} of the input file. "
129
- f"Expected a dictionary in the messages list. Found {type(message)}",
135
+ f"The `messages` column must be a list of dicts. Found {type(message)}",
130
136
  line_number=idx + 1,
131
137
  error_source="key_value",
132
138
  )
139
+
133
140
  for column in REQUIRED_COLUMNS_MESSAGE:
134
141
  if column not in message:
135
142
  raise InvalidFileFormatError(
136
- message=f"Field `{column}` is missing for a turn `{message}` on line {idx + 1} "
137
- "of the the input file.",
143
+ message=f"Missing required column `{column}` in message on line {idx + 1}.",
138
144
  line_number=idx + 1,
139
145
  error_source="key_value",
140
146
  )
141
- else:
142
- if not isinstance(message[column], str):
143
- raise InvalidFileFormatError(
144
- message=f"Invalid format on line {idx + 1} in the column {column} for turn `{message}` "
145
- f"of the input file. Expected string. Found {type(message[column])}",
146
- line_number=idx + 1,
147
- error_source="text_field",
148
- )
149
-
150
- if has_weights and "weight" in message:
151
- weight = message["weight"]
152
- if not isinstance(weight, int):
153
- raise InvalidFileFormatError(
154
- message="Weight must be an integer",
155
- line_number=idx + 1,
156
- error_source="key_value",
157
- )
158
- if weight not in {0, 1}:
147
+ if not isinstance(message[column], str):
159
148
  raise InvalidFileFormatError(
160
- message="Weight must be either 0 or 1",
149
+ message=f"Column `{column}` is not a string on line {idx + 1}. Found {type(message[column])}",
161
150
  line_number=idx + 1,
162
- error_source="key_value",
151
+ error_source="text_field",
163
152
  )
164
- if message["role"] not in POSSIBLE_ROLES_CONVERSATION:
153
+
154
+
155
+ def _check_conversation_roles(
156
+ require_assistant_role: bool, assistant_role_exists: bool, idx: int
157
+ ) -> None:
158
+ """Check that the conversation has correct roles.
159
+
160
+ Args:
161
+ require_assistant_role: Whether to require at least one assistant role.
162
+ assistant_role_exists: Whether an assistant role exists in the conversation.
163
+ idx: Line number in the file.
164
+
165
+ Raises:
166
+ InvalidFileFormatError: If the conversation roles are invalid.
167
+ """
168
+ if require_assistant_role and not assistant_role_exists:
169
+ raise InvalidFileFormatError(
170
+ message=f"Invalid format on line {idx + 1} of the input file. "
171
+ "At least one message with the assistant role must be present in the example.",
172
+ line_number=idx + 1,
173
+ error_source="key_value",
174
+ )
175
+
176
+
177
+ def _check_message_weight(message: Dict[str, str | bool], idx: int) -> None:
178
+ """Check that the message has a weight with the correct type and value.
179
+
180
+ Args:
181
+ message: The message to check.
182
+ idx: Line number in the file.
183
+
184
+ Raises:
185
+ InvalidFileFormatError: If the message weight is invalid.
186
+ """
187
+ if "weight" in message:
188
+ weight = message["weight"]
189
+ if not isinstance(weight, int):
165
190
  raise InvalidFileFormatError(
166
- message=f"Found invalid role `{message['role']}` in the messages on the line {idx + 1}. "
167
- f"Possible roles in the conversation are: {POSSIBLE_ROLES_CONVERSATION}",
191
+ message=f"Weight must be an integer on line {idx + 1}.",
168
192
  line_number=idx + 1,
169
193
  error_source="key_value",
170
194
  )
171
-
172
- if previous_role == message["role"]:
195
+ if weight not in {0, 1}:
173
196
  raise InvalidFileFormatError(
174
- message=f"Invalid role turns on line {idx + 1} of the input file. "
175
- "`user` and `assistant` roles must alternate user/assistant/user/assistant/...",
197
+ message=f"Weight must be either 0 or 1 on line {idx + 1}.",
176
198
  line_number=idx + 1,
177
199
  error_source="key_value",
178
200
  )
179
- previous_role = message["role"]
201
+
202
+
203
+ def _check_message_role(
204
+ message: Dict[str, str | bool], previous_role: str | None, idx: int
205
+ ) -> str | bool:
206
+ """Check that the message has correct roles.
207
+
208
+ Args:
209
+ message: The message to check.
210
+ previous_role: The role of the previous message.
211
+ idx: Line number in the file.
212
+
213
+ Returns:
214
+ str: The role of the current message.
215
+
216
+ Raises:
217
+ InvalidFileFormatError: If the message role is invalid.
218
+ """
219
+ if message["role"] not in POSSIBLE_ROLES_CONVERSATION:
220
+ raise InvalidFileFormatError(
221
+ message=f"Invalid role `{message['role']}` in conversation on line {idx + 1}. "
222
+ f"Possible roles: {', '.join(POSSIBLE_ROLES_CONVERSATION)}",
223
+ line_number=idx + 1,
224
+ error_source="key_value",
225
+ )
226
+ if previous_role is not None and message["role"] == previous_role:
227
+ raise InvalidFileFormatError(
228
+ message=f"Invalid role turns on line {idx + 1} of the input file. "
229
+ "After the optional system message, conversation roles must alternate between user/assistant/user/assistant.",
230
+ line_number=idx + 1,
231
+ error_source="key_value",
232
+ )
233
+ return message["role"]
234
+
235
+
236
+ def validate_messages(
237
+ messages: List[Dict[str, str | bool]], idx: int, require_assistant_role: bool = True
238
+ ) -> None:
239
+ """Validate the messages column.
240
+
241
+ Args:
242
+ messages: List of message dictionaries to validate.
243
+ idx: Line number in the file.
244
+ require_assistant_role: Whether to require at least one assistant role.
245
+
246
+ Raises:
247
+ InvalidFileFormatError: If the messages are invalid.
248
+ """
249
+ _check_conversation_type(messages, idx)
250
+
251
+ has_weights = any("weight" in message for message in messages)
252
+ previous_role = None
253
+ assistant_role_exists = False
254
+
255
+ for message in messages:
256
+ if has_weights:
257
+ _check_message_weight(message, idx)
258
+ previous_role = _check_message_role(message, previous_role, idx)
259
+ assistant_role_exists |= previous_role == "assistant"
260
+
261
+ _check_conversation_roles(require_assistant_role, assistant_role_exists, idx)
180
262
 
181
263
 
182
264
  def validate_preference_openai(example: Dict[str, Any], idx: int = 0) -> None:
@@ -203,37 +285,73 @@ def validate_preference_openai(example: Dict[str, Any], idx: int = 0) -> None:
203
285
  error_source="key_value",
204
286
  )
205
287
 
206
- validate_messages(example["input"]["messages"], idx)
288
+ validate_messages(example["input"]["messages"], idx, require_assistant_role=False)
289
+
290
+ if example["input"]["messages"][-1]["role"] == "assistant":
291
+ raise InvalidFileFormatError(
292
+ message=f"The last message in the input conversation must not be from the assistant on line {idx + 1}.",
293
+ line_number=idx + 1,
294
+ error_source="key_value",
295
+ )
296
+
297
+ keys = ["preferred_output", "non_preferred_output"]
298
+
299
+ for key in keys:
300
+ if key not in example:
301
+ raise InvalidFileFormatError(
302
+ message=f"The dataset is malformed, the `{key}` field must be present in the input dictionary on line {idx + 1}.",
303
+ line_number=idx + 1,
304
+ error_source="key_value",
305
+ )
306
+
307
+ if not isinstance(example[key], list):
308
+ raise InvalidFileFormatError(
309
+ message=f"The dataset is malformed, the `{key}` field must be a list on line {idx + 1}.",
310
+ line_number=idx + 1,
311
+ error_source="key_value",
312
+ )
313
+
314
+ if len(example[key]) != 1:
315
+ raise InvalidFileFormatError(
316
+ message=f"The dataset is malformed, the `{key}` list must contain exactly one message on line {idx + 1}.",
317
+ line_number=idx + 1,
318
+ error_source="key_value",
319
+ )
207
320
 
208
- for output_field in ["preferred_output", "non_preferred_output"]:
209
- if not isinstance(example[output_field], list):
321
+ if not isinstance(example[key][0], dict):
210
322
  raise InvalidFileFormatError(
211
- message=f"The dataset is malformed, the `{output_field}` field must be a list.",
323
+ message=f"The dataset is malformed, the first element of `{key}` must be a dictionary on line {idx + 1}.",
212
324
  line_number=idx + 1,
213
325
  error_source="key_value",
214
326
  )
215
327
 
216
- if len(example[output_field]) != 1:
328
+ if "role" not in example[key][0]:
217
329
  raise InvalidFileFormatError(
218
- message=f"The dataset is malformed, the `{output_field}` list must contain exactly one message.",
330
+ message=f"The dataset is malformed, the first element of `{key}` must have a 'role' field on line {idx + 1}.",
219
331
  line_number=idx + 1,
220
332
  error_source="key_value",
221
333
  )
222
- if "role" not in example[output_field][0]:
334
+
335
+ if example[key][0]["role"] != "assistant":
223
336
  raise InvalidFileFormatError(
224
- message=f"The dataset is malformed, the `{output_field}` message is missing the `role` field.",
337
+ message=f"The dataset is malformed, the first element of `{key}` must have the 'assistant' role on line {idx + 1}.",
225
338
  line_number=idx + 1,
226
339
  error_source="key_value",
227
340
  )
228
- elif example[output_field][0]["role"] != "assistant":
341
+
342
+ if "content" not in example[key][0]:
229
343
  raise InvalidFileFormatError(
230
- message=f"The dataset is malformed, the `{output_field}` must contain an assistant message.",
344
+ message=f"The dataset is malformed, the first element of `{key}` must have a 'content' field on line {idx + 1}.",
231
345
  line_number=idx + 1,
232
346
  error_source="key_value",
233
347
  )
234
348
 
235
- validate_messages(example["preferred_output"], idx)
236
- validate_messages(example["non_preferred_output"], idx)
349
+ if not isinstance(example[key][0]["content"], str):
350
+ raise InvalidFileFormatError(
351
+ message=f"The dataset is malformed, the 'content' field in `{key}` must be a string on line {idx + 1}.",
352
+ line_number=idx + 1,
353
+ error_source="key_value",
354
+ )
237
355
 
238
356
 
239
357
  def _check_utf8(file: Path) -> Dict[str, Any]:
@@ -410,7 +528,12 @@ def _check_jsonl(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
410
528
  message_column = JSONL_REQUIRED_COLUMNS_MAP[
411
529
  DatasetFormat.CONVERSATION
412
530
  ][0]
413
- validate_messages(json_line[message_column], idx)
531
+ require_assistant = purpose != FilePurpose.Eval
532
+ validate_messages(
533
+ json_line[message_column],
534
+ idx,
535
+ require_assistant_role=require_assistant,
536
+ )
414
537
  else:
415
538
  for column in JSONL_REQUIRED_COLUMNS_MAP[current_format]:
416
539
  if not isinstance(json_line[column], str):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: together
3
- Version: 1.5.28
3
+ Version: 1.5.30
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
@@ -17,6 +17,7 @@ Classifier: Programming Language :: Python :: 3.13
17
17
  Classifier: Programming Language :: Python :: 3.14
18
18
  Provides-Extra: pyarrow
19
19
  Requires-Dist: aiohttp (>=3.9.3,<4.0.0)
20
+ Requires-Dist: black (>=25.9.0,<26.0.0)
20
21
  Requires-Dist: click (>=8.1.7,<9.0.0)
21
22
  Requires-Dist: eval-type-backport (>=0.1.3,<0.3.0)
22
23
  Requires-Dist: filelock (>=3.13.1,<4.0.0)
@@ -1,11 +1,11 @@
1
1
  together/__init__.py,sha256=B8T7ybZ7D6jJNRTuFDVjOFlImCNag8tNZXpZdXz7xNM,1530
2
2
  together/abstract/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- together/abstract/api_requestor.py,sha256=kKVxkJqpd1CQ4t9Ky4kngkvlzZh1xoDN0PBAM8mGW_Q,25948
3
+ together/abstract/api_requestor.py,sha256=CPFsQXEqIoXDcqxlDQyumbTMtGmL7CQYtSYrkb3binU,27556
4
4
  together/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
- together/cli/api/endpoints.py,sha256=f6KafWZvRF6n_ThWdr3y9uhE6wPF37PcD45w_EtgXmY,13289
8
+ together/cli/api/endpoints.py,sha256=ShQQuMRwg70bclEqplk2aru_IlwOdp4DEuLZ1kG1KvA,14622
9
9
  together/cli/api/evaluation.py,sha256=36SsujC5qicf-8l8GA8wqRtEC8NKzsAjL-_nYhePpQM,14691
10
10
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
11
11
  together/cli/api/finetune.py,sha256=zG8Peg7DuptMpT5coqqGbRdaxM5SxQgte9tIv7tMJbM,18437
@@ -26,28 +26,29 @@ together/legacy/finetune.py,sha256=nL2Ytt8FOVtGbcMumnn1gyf4aEFrRok8GolWJJaHQAg,5
26
26
  together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
27
27
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
28
28
  together/resources/__init__.py,sha256=iOo8bNF8J7EKgShrEZSWpeULTgyHnuxmRrwmVHyGy4Y,1280
29
- together/resources/audio/__init__.py,sha256=S6gV6aEPAHL9kskoA38Uq_Ju7uM1Xcfl0doO-DtQLbo,1185
30
- together/resources/audio/speech.py,sha256=81ib_gIo-Rxoaipx2Pi9ZsKnOTjeFPwSlBrcUkyX5xk,5211
31
- together/resources/audio/transcriptions.py,sha256=dtKRVqrmJsTufAxbDMvTzco7E3iW5qbP3oEvcjGhvUY,10883
29
+ together/resources/audio/__init__.py,sha256=S8moxi0iEOw3NZMtXN0TPDP37k1q9tNZx-qH4SV72hQ,1439
30
+ together/resources/audio/speech.py,sha256=3lVxJPQM1bbStkAJWym3eJua-AxuYSP9jBZy0jLls_M,5446
31
+ together/resources/audio/transcriptions.py,sha256=99EF-Kyt-oySF887U4Wtzg49jTf6L_ln8AZVhvrl1HA,12377
32
32
  together/resources/audio/translations.py,sha256=_2VeYEthYzPIflDD_hlVmoXk-OCgLgnvva2vMPpaU_Q,10508
33
+ together/resources/audio/voices.py,sha256=Xyjv_jI5hFTvRouiryT0m4pre9_SoZOa8r5agVmoFSU,1699
33
34
  together/resources/batch.py,sha256=dBXgh264AQPsO3pCff1vT1PAewnX9yroxa8UZQUJAqE,4584
34
35
  together/resources/chat/__init__.py,sha256=RsTptdP8MeGjcdIjze896-J27cRvCbUoMft0X2BVlQ8,617
35
36
  together/resources/chat/completions.py,sha256=cBsSFWi9qToQCn4V_3qJ0gwRqORjF6NFDXmHcHfIhOY,14442
36
37
  together/resources/code_interpreter.py,sha256=vbN8Mh5MG6HQvqra7p61leIyfebgbgJTM_q2A_Fylhw,2948
37
38
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
38
39
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
39
- together/resources/endpoints.py,sha256=_UoNSknG7aK0jOcxBPcq21KYpWsNFCfNePUWGMOUjmc,17173
40
+ together/resources/endpoints.py,sha256=BP75wUEcOtpiUbfLAQH5GX2RL8_RnM522-D8Iz7_LUU,20378
40
41
  together/resources/evaluation.py,sha256=eYSs9HUpW51XZjX-yNlFZlLapsuEDINJ8BjxJoYa4U0,31443
41
42
  together/resources/files.py,sha256=_uK5xzriXNOGNw3tQGuTbCaxBRo6Az6_cXOUtBNFzDk,5434
42
43
  together/resources/finetune.py,sha256=VeMyPG-PA16d2UAzqNTQEAKBgMvVApj97lTAHEuR0kc,44890
43
- together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
44
+ together/resources/images.py,sha256=FHXkcnzyj2JLw4YF1NH56hgISEeCO0Sg_SvTCcTJaOo,4831
44
45
  together/resources/models.py,sha256=WpP-x25AXYpmu-VKu_X4Up-zHwpWBBvPRpbV4FsWQrU,8266
45
46
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
46
47
  together/resources/videos.py,sha256=Dn7vslH1pZVw4WYvH-69fjzqLZdKHkTK-lIbFkxh0w0,11144
47
48
  together/together_response.py,sha256=a3dgKMPDrlfKQwxYENfNt2T4l2vSZxRWMixhHSy-q3E,1308
48
- together/types/__init__.py,sha256=QTGi5Y7MAQv9ik1gFwReFZ3tN7-M-mYo_J3M41-L2FU,4258
49
+ together/types/__init__.py,sha256=eK8DXMzHp78kieDv7JpXNbcS2k3aWvyQrgLdYUtL_qM,4342
49
50
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
50
- together/types/audio_speech.py,sha256=iGFUawdzPoRjLFatkSl5S-liAzhJaye6raZwaSdUrSg,5012
51
+ together/types/audio_speech.py,sha256=pUzqpx7NCjtPIq91xO2k0psetzLz29NTHHm6DS0k8Xg,9682
51
52
  together/types/batch.py,sha256=KiI5i1En7cyIUxHhVIGoQk6Wlw19c0PXSqDWwc2KZ2c,1140
52
53
  together/types/chat_completions.py,sha256=NxJ7tFlWynxoLsRtQHzM7Ka3QxKVjRs6EvtOTYZ79bM,5340
53
54
  together/types/code_interpreter.py,sha256=cjF8TKgRkJllHS4i24dWQZBGTRsG557eHSewOiip0Kk,1770
@@ -59,18 +60,18 @@ together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
59
60
  together/types/evaluation.py,sha256=9gCAgzAwFD95MWnSgvxnSYFF27wKOTqIGn-wSOpFt2M,2385
60
61
  together/types/files.py,sha256=XCimmKDaSEEfavOtp0UH-ZrRxrmHoCTYLlmmhshbr7A,1994
61
62
  together/types/finetune.py,sha256=EQAJVXqK1Ne2V2dCfUiJgOwK9_x_7TwQRrjWavap698,11396
62
- together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
63
+ together/types/images.py,sha256=IsrmIM2FVeG-kP4vhZUx5fG5EhOJ-d8fefrAmOVKNDs,926
63
64
  together/types/models.py,sha256=V8bcy1c3uTmqwnTVphbYLF2AJ6l2P2724njl36TzfHQ,2878
64
65
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
65
66
  together/types/videos.py,sha256=KCLk8CF0kbA_51qnHOzAWg5VA6HTlwnY-sTZ2lUR0Eo,1861
66
67
  together/utils/__init__.py,sha256=5fqvj4KT2rHxKSQot2TSyV_HcvkvkGiqAiaYuJwqtm0,786
67
68
  together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
68
69
  together/utils/api_helpers.py,sha256=2K0O6qeEQ2zVFvi5NBN5m2kjZJaS3-JfKFecQ7SmGaw,3746
69
- together/utils/files.py,sha256=Zqw1MA0CbnpkiGWSMk0DtRUFzf7-kWDE1OgzGWinbV4,20671
70
+ together/utils/files.py,sha256=oFmQZZHud6sMlT1OCUMx2Ab6t7ScBcZ72em0KQ75BJI,24879
70
71
  together/utils/tools.py,sha256=H2MTJhEqtBllaDvOyZehIO_IVNK3P17rSDeILtJIVag,2964
71
72
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
72
- together-1.5.28.dist-info/METADATA,sha256=N3_xshiZVte8P8a_tm_e6qENYytCn9RpwuQvpC4mTZg,16543
73
- together-1.5.28.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
74
- together-1.5.28.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
75
- together-1.5.28.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
76
- together-1.5.28.dist-info/RECORD,,
73
+ together-1.5.30.dist-info/METADATA,sha256=w7u0mFGUl4wpYgwCXtfeEk6A6_ArlRvju65HRPOyAD4,16583
74
+ together-1.5.30.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
75
+ together-1.5.30.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
76
+ together-1.5.30.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
77
+ together-1.5.30.dist-info/RECORD,,