together 1.5.35__py3-none-any.whl → 2.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -114
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +66 -84
- together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
- together/{cli → lib/cli}/api/models.py +34 -27
- together/lib/cli/api/utils.py +50 -0
- together/{cli → lib/cli}/cli.py +16 -26
- together/{constants.py → lib/constants.py} +11 -24
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +397 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/{utils → lib/utils}/files.py +90 -288
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -39
- together/resources/audio/__init__.py +72 -48
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -128
- together/resources/audio/transcriptions.py +247 -261
- together/resources/audio/translations.py +221 -241
- together/resources/audio/voices.py +111 -41
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +589 -490
- together/resources/evals.py +452 -0
- together/resources/files.py +397 -129
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +258 -104
- together/resources/jobs.py +214 -0
- together/resources/models.py +223 -193
- together/resources/rerank.py +190 -92
- together/resources/videos.py +286 -214
- together/types/__init__.py +66 -167
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_create_response.py +10 -0
- together/types/video_job.py +57 -0
- together-2.0.0a6.dist-info/METADATA +729 -0
- together-2.0.0a6.dist-info/RECORD +165 -0
- {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
- together-2.0.0a6.dist-info/entry_points.txt +2 -0
- {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -770
- together/cli/api/chat.py +0 -298
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/utils.py +0 -139
- together/client.py +0 -186
- together/error.py +0 -194
- together/filemanager.py +0 -635
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -165
- together/resources/code_interpreter.py +0 -82
- together/resources/evaluation.py +0 -808
- together/resources/finetune.py +0 -1388
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -311
- together/types/batch.py +0 -54
- together/types/chat_completions.py +0 -210
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -67
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/evaluation.py +0 -93
- together/types/files.py +0 -93
- together/types/finetune.py +0 -465
- together/types/images.py +0 -42
- together/types/models.py +0 -96
- together/types/rerank.py +0 -43
- together/types/videos.py +0 -69
- together/utils/api_helpers.py +0 -124
- together/version.py +0 -6
- together-1.5.35.dist-info/METADATA +0 -583
- together-1.5.35.dist-info/RECORD +0 -77
- together-1.5.35.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
together/together_response.py
DELETED
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import Any, Dict
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class TogetherResponse:
|
|
7
|
-
"""
|
|
8
|
-
API Response class. Stores headers and response data.
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
def __init__(self, data: Any, headers: Dict[str, Any]):
|
|
12
|
-
self._headers = headers
|
|
13
|
-
self.data = data
|
|
14
|
-
|
|
15
|
-
@property
|
|
16
|
-
def request_id(self) -> str | None:
|
|
17
|
-
"""
|
|
18
|
-
Fetches request id from headers
|
|
19
|
-
"""
|
|
20
|
-
if "cf-ray" in self._headers:
|
|
21
|
-
return str(self._headers["cf-ray"])
|
|
22
|
-
return None
|
|
23
|
-
|
|
24
|
-
@property
|
|
25
|
-
def requests_remaining(self) -> int | None:
|
|
26
|
-
"""
|
|
27
|
-
Number of requests remaining at current rate limit
|
|
28
|
-
"""
|
|
29
|
-
if "x-ratelimit-remaining" in self._headers:
|
|
30
|
-
return int(self._headers["x-ratelimit-remaining"])
|
|
31
|
-
return None
|
|
32
|
-
|
|
33
|
-
@property
|
|
34
|
-
def processed_by(self) -> str | None:
|
|
35
|
-
"""
|
|
36
|
-
Processing host server name
|
|
37
|
-
"""
|
|
38
|
-
if "x-hostname" in self._headers:
|
|
39
|
-
return str(self._headers["x-hostname"])
|
|
40
|
-
return None
|
|
41
|
-
|
|
42
|
-
@property
|
|
43
|
-
def response_ms(self) -> int | None:
|
|
44
|
-
"""
|
|
45
|
-
Server request completion time
|
|
46
|
-
"""
|
|
47
|
-
if "x-total-time" in self._headers:
|
|
48
|
-
h = self._headers["x-total-time"]
|
|
49
|
-
return None if h is None else round(float(h))
|
|
50
|
-
return None
|
together/types/abstract.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from typing import Dict
|
|
5
|
-
|
|
6
|
-
import pydantic
|
|
7
|
-
from pydantic import ConfigDict
|
|
8
|
-
from typing_extensions import ClassVar
|
|
9
|
-
|
|
10
|
-
from together.constants import BASE_URL, MAX_RETRIES, TIMEOUT_SECS
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@dataclass
|
|
17
|
-
class TogetherClient:
|
|
18
|
-
api_key: str | None = None
|
|
19
|
-
base_url: str | None = BASE_URL
|
|
20
|
-
timeout: float | None = TIMEOUT_SECS
|
|
21
|
-
max_retries: int | None = MAX_RETRIES
|
|
22
|
-
supplied_headers: Dict[str, str] | None = None
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class BaseModel(pydantic.BaseModel):
|
|
26
|
-
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
|
together/types/audio_speech.py
DELETED
|
@@ -1,311 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import base64
|
|
4
|
-
from enum import Enum
|
|
5
|
-
from re import S
|
|
6
|
-
from typing import BinaryIO, Dict, Iterator, List, Optional, Union
|
|
7
|
-
|
|
8
|
-
from pydantic import BaseModel, ConfigDict
|
|
9
|
-
|
|
10
|
-
from together.together_response import TogetherResponse
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class AudioResponseFormat(str, Enum):
|
|
14
|
-
MP3 = "mp3"
|
|
15
|
-
WAV = "wav"
|
|
16
|
-
RAW = "raw"
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class AudioLanguage(str, Enum):
|
|
20
|
-
EN = "en"
|
|
21
|
-
DE = "de"
|
|
22
|
-
FR = "fr"
|
|
23
|
-
ES = "es"
|
|
24
|
-
HI = "hi"
|
|
25
|
-
IT = "it"
|
|
26
|
-
JA = "ja"
|
|
27
|
-
KO = "ko"
|
|
28
|
-
NL = "nl"
|
|
29
|
-
PL = "pl"
|
|
30
|
-
PT = "pt"
|
|
31
|
-
RU = "ru"
|
|
32
|
-
SV = "sv"
|
|
33
|
-
TR = "tr"
|
|
34
|
-
ZH = "zh"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class AudioResponseEncoding(str, Enum):
|
|
38
|
-
PCM_F32LE = "pcm_f32le"
|
|
39
|
-
PCM_S16LE = "pcm_s16le"
|
|
40
|
-
PCM_MULAW = "pcm_mulaw"
|
|
41
|
-
PCM_ALAW = "pcm_alaw"
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class AudioObjectType(str, Enum):
|
|
45
|
-
AUDIO_TTS_CHUNK = "audio.tts.chunk"
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class StreamSentinelType(str, Enum):
|
|
49
|
-
DONE = "[DONE]"
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class AudioSpeechRequest(BaseModel):
|
|
53
|
-
model: str
|
|
54
|
-
input: str
|
|
55
|
-
voice: str | None = None
|
|
56
|
-
response_format: AudioResponseFormat = AudioResponseFormat.MP3
|
|
57
|
-
language: AudioLanguage = AudioLanguage.EN
|
|
58
|
-
response_encoding: AudioResponseEncoding = AudioResponseEncoding.PCM_F32LE
|
|
59
|
-
sample_rate: int = 44100
|
|
60
|
-
stream: bool = False
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
class AudioSpeechStreamChunk(BaseModel):
|
|
64
|
-
object: AudioObjectType = AudioObjectType.AUDIO_TTS_CHUNK
|
|
65
|
-
model: str
|
|
66
|
-
b64: str
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
class AudioSpeechStreamEvent(BaseModel):
|
|
70
|
-
data: AudioSpeechStreamChunk
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class StreamSentinel(BaseModel):
|
|
74
|
-
data: StreamSentinelType = StreamSentinelType.DONE
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class AudioSpeechStreamEventResponse(BaseModel):
|
|
78
|
-
response: AudioSpeechStreamEvent | StreamSentinel
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
class AudioSpeechStreamResponse(BaseModel):
|
|
82
|
-
response: TogetherResponse | Iterator[TogetherResponse]
|
|
83
|
-
|
|
84
|
-
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
85
|
-
|
|
86
|
-
def stream_to_file(
|
|
87
|
-
self, file_path: str, response_format: AudioResponseFormat | str | None = None
|
|
88
|
-
) -> None:
|
|
89
|
-
"""
|
|
90
|
-
Save the audio response to a file.
|
|
91
|
-
|
|
92
|
-
For non-streaming responses, writes the complete file as received.
|
|
93
|
-
For streaming responses, collects binary chunks and constructs a valid
|
|
94
|
-
file format based on the response_format parameter.
|
|
95
|
-
|
|
96
|
-
Args:
|
|
97
|
-
file_path: Path where the audio file should be saved.
|
|
98
|
-
response_format: Format of the audio (wav, mp3, or raw). If not provided,
|
|
99
|
-
will attempt to infer from file extension or default to wav.
|
|
100
|
-
"""
|
|
101
|
-
# Determine response format
|
|
102
|
-
if response_format is None:
|
|
103
|
-
# Infer from file extension
|
|
104
|
-
ext = file_path.lower().split(".")[-1] if "." in file_path else ""
|
|
105
|
-
if ext in ["wav"]:
|
|
106
|
-
response_format = AudioResponseFormat.WAV
|
|
107
|
-
elif ext in ["mp3", "mpeg"]:
|
|
108
|
-
response_format = AudioResponseFormat.MP3
|
|
109
|
-
elif ext in ["raw", "pcm"]:
|
|
110
|
-
response_format = AudioResponseFormat.RAW
|
|
111
|
-
else:
|
|
112
|
-
# Default to WAV if unknown
|
|
113
|
-
response_format = AudioResponseFormat.WAV
|
|
114
|
-
|
|
115
|
-
if isinstance(response_format, str):
|
|
116
|
-
response_format = AudioResponseFormat(response_format)
|
|
117
|
-
|
|
118
|
-
if isinstance(self.response, TogetherResponse):
|
|
119
|
-
# Non-streaming: save complete file
|
|
120
|
-
with open(file_path, "wb") as f:
|
|
121
|
-
f.write(self.response.data)
|
|
122
|
-
|
|
123
|
-
elif isinstance(self.response, Iterator):
|
|
124
|
-
# Streaming: collect binary chunks
|
|
125
|
-
audio_chunks = []
|
|
126
|
-
for chunk in self.response:
|
|
127
|
-
if isinstance(chunk.data, bytes):
|
|
128
|
-
audio_chunks.append(chunk.data)
|
|
129
|
-
elif isinstance(chunk.data, dict):
|
|
130
|
-
# SSE format with JSON/base64
|
|
131
|
-
try:
|
|
132
|
-
stream_event = AudioSpeechStreamEventResponse(
|
|
133
|
-
response={"data": chunk.data}
|
|
134
|
-
)
|
|
135
|
-
if isinstance(stream_event.response, StreamSentinel):
|
|
136
|
-
break
|
|
137
|
-
audio_chunks.append(
|
|
138
|
-
base64.b64decode(stream_event.response.data.b64)
|
|
139
|
-
)
|
|
140
|
-
except Exception:
|
|
141
|
-
continue # Skip malformed chunks
|
|
142
|
-
|
|
143
|
-
if not audio_chunks:
|
|
144
|
-
raise ValueError("No audio data received in streaming response")
|
|
145
|
-
|
|
146
|
-
# Concatenate all chunks
|
|
147
|
-
audio_data = b"".join(audio_chunks)
|
|
148
|
-
|
|
149
|
-
with open(file_path, "wb") as f:
|
|
150
|
-
if response_format == AudioResponseFormat.WAV:
|
|
151
|
-
if audio_data.startswith(b"RIFF"):
|
|
152
|
-
# Already a valid WAV file
|
|
153
|
-
f.write(audio_data)
|
|
154
|
-
else:
|
|
155
|
-
# Raw PCM - add WAV header
|
|
156
|
-
self._write_wav_header(f, audio_data)
|
|
157
|
-
elif response_format == AudioResponseFormat.MP3:
|
|
158
|
-
# MP3 format: Check if data is actually MP3 or raw PCM
|
|
159
|
-
# MP3 files start with ID3 tag or sync word (0xFF 0xFB/0xFA/0xF3/0xF2)
|
|
160
|
-
is_mp3 = audio_data.startswith(b"ID3") or (
|
|
161
|
-
len(audio_data) > 0
|
|
162
|
-
and audio_data[0:1] == b"\xff"
|
|
163
|
-
and len(audio_data) > 1
|
|
164
|
-
and audio_data[1] & 0xE0 == 0xE0
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
if is_mp3:
|
|
168
|
-
f.write(audio_data)
|
|
169
|
-
else:
|
|
170
|
-
raise ValueError("Invalid MP3 data received.")
|
|
171
|
-
else:
|
|
172
|
-
# RAW format: write PCM data as-is
|
|
173
|
-
f.write(audio_data)
|
|
174
|
-
|
|
175
|
-
@staticmethod
|
|
176
|
-
def _write_wav_header(file_handle: BinaryIO, audio_data: bytes) -> None:
|
|
177
|
-
"""
|
|
178
|
-
Write WAV file header for raw PCM audio data.
|
|
179
|
-
|
|
180
|
-
Uses default TTS parameters: 16-bit PCM, mono, 24000 Hz sample rate.
|
|
181
|
-
"""
|
|
182
|
-
import struct
|
|
183
|
-
|
|
184
|
-
sample_rate = 24000
|
|
185
|
-
num_channels = 1
|
|
186
|
-
bits_per_sample = 16
|
|
187
|
-
byte_rate = sample_rate * num_channels * bits_per_sample // 8
|
|
188
|
-
block_align = num_channels * bits_per_sample // 8
|
|
189
|
-
data_size = len(audio_data)
|
|
190
|
-
|
|
191
|
-
# Write WAV header
|
|
192
|
-
file_handle.write(b"RIFF")
|
|
193
|
-
file_handle.write(struct.pack("<I", 36 + data_size)) # File size - 8
|
|
194
|
-
file_handle.write(b"WAVE")
|
|
195
|
-
file_handle.write(b"fmt ")
|
|
196
|
-
file_handle.write(struct.pack("<I", 16)) # fmt chunk size
|
|
197
|
-
file_handle.write(struct.pack("<H", 1)) # Audio format (1 = PCM)
|
|
198
|
-
file_handle.write(struct.pack("<H", num_channels))
|
|
199
|
-
file_handle.write(struct.pack("<I", sample_rate))
|
|
200
|
-
file_handle.write(struct.pack("<I", byte_rate))
|
|
201
|
-
file_handle.write(struct.pack("<H", block_align))
|
|
202
|
-
file_handle.write(struct.pack("<H", bits_per_sample))
|
|
203
|
-
file_handle.write(b"data")
|
|
204
|
-
file_handle.write(struct.pack("<I", data_size))
|
|
205
|
-
file_handle.write(audio_data)
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
class AudioTranscriptionResponseFormat(str, Enum):
|
|
209
|
-
JSON = "json"
|
|
210
|
-
VERBOSE_JSON = "verbose_json"
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
class AudioTimestampGranularities(str, Enum):
|
|
214
|
-
SEGMENT = "segment"
|
|
215
|
-
WORD = "word"
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
class AudioTranscriptionRequest(BaseModel):
|
|
219
|
-
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
220
|
-
|
|
221
|
-
file: Union[str, BinaryIO]
|
|
222
|
-
model: str = "openai/whisper-large-v3"
|
|
223
|
-
language: Optional[str] = None
|
|
224
|
-
prompt: Optional[str] = None
|
|
225
|
-
response_format: AudioTranscriptionResponseFormat = (
|
|
226
|
-
AudioTranscriptionResponseFormat.JSON
|
|
227
|
-
)
|
|
228
|
-
temperature: float = 0.0
|
|
229
|
-
timestamp_granularities: Optional[AudioTimestampGranularities] = (
|
|
230
|
-
AudioTimestampGranularities.SEGMENT
|
|
231
|
-
)
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
class AudioTranslationRequest(BaseModel):
|
|
235
|
-
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
236
|
-
|
|
237
|
-
file: Union[str, BinaryIO]
|
|
238
|
-
model: str = "openai/whisper-large-v3"
|
|
239
|
-
language: Optional[str] = None
|
|
240
|
-
prompt: Optional[str] = None
|
|
241
|
-
response_format: AudioTranscriptionResponseFormat = (
|
|
242
|
-
AudioTranscriptionResponseFormat.JSON
|
|
243
|
-
)
|
|
244
|
-
temperature: float = 0.0
|
|
245
|
-
timestamp_granularities: Optional[AudioTimestampGranularities] = (
|
|
246
|
-
AudioTimestampGranularities.SEGMENT
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
class AudioTranscriptionSegment(BaseModel):
|
|
251
|
-
id: int
|
|
252
|
-
start: float
|
|
253
|
-
end: float
|
|
254
|
-
text: str
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
class AudioTranscriptionWord(BaseModel):
|
|
258
|
-
word: str
|
|
259
|
-
start: float
|
|
260
|
-
end: float
|
|
261
|
-
id: Optional[int] = None
|
|
262
|
-
speaker_id: Optional[str] = None
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
class AudioSpeakerSegment(BaseModel):
|
|
266
|
-
id: int
|
|
267
|
-
speaker_id: str
|
|
268
|
-
start: float
|
|
269
|
-
end: float
|
|
270
|
-
text: str
|
|
271
|
-
words: List[AudioTranscriptionWord]
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
class AudioTranscriptionResponse(BaseModel):
|
|
275
|
-
text: str
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
class AudioTranscriptionVerboseResponse(BaseModel):
|
|
279
|
-
id: Optional[str] = None
|
|
280
|
-
language: Optional[str] = None
|
|
281
|
-
duration: Optional[float] = None
|
|
282
|
-
text: str
|
|
283
|
-
segments: Optional[List[AudioTranscriptionSegment]] = None
|
|
284
|
-
words: Optional[List[AudioTranscriptionWord]] = None
|
|
285
|
-
speaker_segments: Optional[List[AudioSpeakerSegment]] = None
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
class AudioTranslationResponse(BaseModel):
|
|
289
|
-
text: str
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
class AudioTranslationVerboseResponse(BaseModel):
|
|
293
|
-
task: Optional[str] = None
|
|
294
|
-
language: Optional[str] = None
|
|
295
|
-
duration: Optional[float] = None
|
|
296
|
-
text: str
|
|
297
|
-
segments: Optional[List[AudioTranscriptionSegment]] = None
|
|
298
|
-
words: Optional[List[AudioTranscriptionWord]] = None
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
class ModelVoices(BaseModel):
|
|
302
|
-
"""Represents a model with its available voices."""
|
|
303
|
-
|
|
304
|
-
model: str
|
|
305
|
-
voices: List[Dict[str, str]] # Each voice is a dict with 'name' key
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
class VoiceListResponse(BaseModel):
|
|
309
|
-
"""Response containing a list of models and their available voices."""
|
|
310
|
-
|
|
311
|
-
data: List[ModelVoices]
|
together/types/batch.py
DELETED
|
@@ -1,54 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from typing import Optional
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
|
|
7
|
-
from pydantic import Field
|
|
8
|
-
|
|
9
|
-
from together.types.abstract import BaseModel
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class BatchJobStatus(str, Enum):
|
|
13
|
-
"""
|
|
14
|
-
The status of a batch job
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
VALIDATING = "VALIDATING"
|
|
18
|
-
IN_PROGRESS = "IN_PROGRESS"
|
|
19
|
-
COMPLETED = "COMPLETED"
|
|
20
|
-
FAILED = "FAILED"
|
|
21
|
-
EXPIRED = "EXPIRED"
|
|
22
|
-
CANCELLED = "CANCELLED"
|
|
23
|
-
CANCELING = "CANCELING"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class BatchEndpoint(str, Enum):
|
|
27
|
-
"""
|
|
28
|
-
The endpoint of a batch job
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
COMPLETIONS = "/v1/completions"
|
|
32
|
-
CHAT_COMPLETIONS = "/v1/chat/completions"
|
|
33
|
-
# More endpoints can be added here as needed
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class BatchJob(BaseModel):
|
|
37
|
-
"""
|
|
38
|
-
A batch job object
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
id: str
|
|
42
|
-
user_id: str
|
|
43
|
-
input_file_id: str
|
|
44
|
-
file_size_bytes: int
|
|
45
|
-
status: BatchJobStatus
|
|
46
|
-
job_deadline: datetime
|
|
47
|
-
created_at: datetime
|
|
48
|
-
endpoint: str
|
|
49
|
-
progress: float = 0.0
|
|
50
|
-
model_id: Optional[str] = None
|
|
51
|
-
output_file_id: Optional[str] = None
|
|
52
|
-
error_file_id: Optional[str] = None
|
|
53
|
-
error: Optional[str] = None
|
|
54
|
-
completed_at: Optional[datetime] = None
|
|
@@ -1,210 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import warnings
|
|
4
|
-
from enum import Enum
|
|
5
|
-
from typing import Any, Dict, List
|
|
6
|
-
|
|
7
|
-
from pydantic import model_validator
|
|
8
|
-
from typing_extensions import Self
|
|
9
|
-
|
|
10
|
-
from together.types.abstract import BaseModel
|
|
11
|
-
from together.types.common import (
|
|
12
|
-
DeltaContent,
|
|
13
|
-
FinishReason,
|
|
14
|
-
LogprobsPart,
|
|
15
|
-
ObjectType,
|
|
16
|
-
PromptPart,
|
|
17
|
-
UsageData,
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class MessageRole(str, Enum):
|
|
22
|
-
ASSISTANT = "assistant"
|
|
23
|
-
SYSTEM = "system"
|
|
24
|
-
USER = "user"
|
|
25
|
-
TOOL = "tool"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class ResponseFormatType(str, Enum):
|
|
29
|
-
JSON_OBJECT = "json_object"
|
|
30
|
-
JSON_SCHEMA = "json_schema"
|
|
31
|
-
REGEX = "regex"
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class FunctionCall(BaseModel):
|
|
35
|
-
name: str | None = None
|
|
36
|
-
arguments: str | None = None
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
class ToolCalls(BaseModel):
|
|
40
|
-
id: str | None = None
|
|
41
|
-
type: str | None = None
|
|
42
|
-
function: FunctionCall | None = None
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
class ChatCompletionMessageContentType(str, Enum):
|
|
46
|
-
TEXT = "text"
|
|
47
|
-
IMAGE_URL = "image_url"
|
|
48
|
-
VIDEO_URL = "video_url"
|
|
49
|
-
AUDIO_URL = "audio_url"
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class ChatCompletionMessageContentImageURL(BaseModel):
|
|
53
|
-
url: str
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
class ChatCompletionMessageContentVideoURL(BaseModel):
|
|
57
|
-
url: str
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
class ChatCompletionMessageContentAudioURL(BaseModel):
|
|
61
|
-
url: str
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
class ChatCompletionMessageContent(BaseModel):
|
|
65
|
-
type: ChatCompletionMessageContentType
|
|
66
|
-
text: str | None = None
|
|
67
|
-
image_url: ChatCompletionMessageContentImageURL | None = None
|
|
68
|
-
video_url: ChatCompletionMessageContentVideoURL | None = None
|
|
69
|
-
audio_url: ChatCompletionMessageContentAudioURL | None = None
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
class ChatCompletionMessage(BaseModel):
|
|
73
|
-
role: MessageRole
|
|
74
|
-
content: str | List[ChatCompletionMessageContent] | None = None
|
|
75
|
-
tool_calls: List[ToolCalls] | None = None
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
class ResponseFormat(BaseModel):
|
|
79
|
-
type: ResponseFormatType
|
|
80
|
-
schema_: Dict[str, Any] | None = None
|
|
81
|
-
pattern: str | None = None
|
|
82
|
-
|
|
83
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
84
|
-
result: Dict[str, Any] = {"type": self.type.value}
|
|
85
|
-
if self.schema_ is not None:
|
|
86
|
-
result["schema"] = self.schema_
|
|
87
|
-
if self.pattern is not None:
|
|
88
|
-
result["pattern"] = self.pattern
|
|
89
|
-
return result
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
class FunctionTool(BaseModel):
|
|
93
|
-
description: str | None = None
|
|
94
|
-
name: str
|
|
95
|
-
parameters: Dict[str, Any] | None = None
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
class FunctionToolChoice(BaseModel):
|
|
99
|
-
name: str
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
class Tools(BaseModel):
|
|
103
|
-
type: str
|
|
104
|
-
function: FunctionTool
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
class ToolChoice(BaseModel):
|
|
108
|
-
type: str
|
|
109
|
-
function: FunctionToolChoice
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
class ToolChoiceEnum(str, Enum):
|
|
113
|
-
Auto = "auto"
|
|
114
|
-
Required = "required"
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
class ChatCompletionRequest(BaseModel):
|
|
118
|
-
# list of messages
|
|
119
|
-
messages: List[ChatCompletionMessage]
|
|
120
|
-
# model name
|
|
121
|
-
model: str
|
|
122
|
-
# stopping criteria: max tokens to generate
|
|
123
|
-
max_tokens: int | None = None
|
|
124
|
-
# stopping criteria: list of strings to stop generation
|
|
125
|
-
stop: List[str] | None = None
|
|
126
|
-
# sampling hyperparameters
|
|
127
|
-
temperature: float | None = None
|
|
128
|
-
top_p: float | None = None
|
|
129
|
-
top_k: int | None = None
|
|
130
|
-
repetition_penalty: float | None = None
|
|
131
|
-
presence_penalty: float | None = None
|
|
132
|
-
frequency_penalty: float | None = None
|
|
133
|
-
min_p: float | None = None
|
|
134
|
-
logit_bias: Dict[str, float] | None = None
|
|
135
|
-
seed: int | None = None
|
|
136
|
-
# stream SSE token chunks
|
|
137
|
-
stream: bool = False
|
|
138
|
-
# return logprobs
|
|
139
|
-
logprobs: int | None = None
|
|
140
|
-
# echo prompt.
|
|
141
|
-
# can be used with logprobs to return prompt logprobs
|
|
142
|
-
echo: bool | None = None
|
|
143
|
-
# number of output generations
|
|
144
|
-
n: int | None = None
|
|
145
|
-
# moderation model
|
|
146
|
-
safety_model: str | None = None
|
|
147
|
-
# constraints
|
|
148
|
-
response_format: ResponseFormat | None = None
|
|
149
|
-
tools: List[Tools] | None = None
|
|
150
|
-
tool_choice: ToolChoice | ToolChoiceEnum | None = None
|
|
151
|
-
|
|
152
|
-
# Raise warning if repetition_penalty is used with presence_penalty or frequency_penalty
|
|
153
|
-
@model_validator(mode="after")
|
|
154
|
-
def verify_parameters(self) -> Self:
|
|
155
|
-
if self.repetition_penalty:
|
|
156
|
-
if self.presence_penalty or self.frequency_penalty:
|
|
157
|
-
warnings.warn(
|
|
158
|
-
"repetition_penalty is not advisable to be used alongside presence_penalty or frequency_penalty"
|
|
159
|
-
)
|
|
160
|
-
return self
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
class ChatCompletionChoicesData(BaseModel):
|
|
164
|
-
index: int | None = None
|
|
165
|
-
logprobs: LogprobsPart | None = None
|
|
166
|
-
seed: int | None = None
|
|
167
|
-
finish_reason: FinishReason | None = None
|
|
168
|
-
message: ChatCompletionMessage | None = None
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
class ChatCompletionResponse(BaseModel):
|
|
172
|
-
# request id
|
|
173
|
-
id: str | None = None
|
|
174
|
-
# object type
|
|
175
|
-
object: ObjectType | None = None
|
|
176
|
-
# created timestamp
|
|
177
|
-
created: int | None = None
|
|
178
|
-
# model name
|
|
179
|
-
model: str | None = None
|
|
180
|
-
# choices list
|
|
181
|
-
choices: List[ChatCompletionChoicesData] | None = None
|
|
182
|
-
# prompt list
|
|
183
|
-
prompt: List[PromptPart] | List[None] | None = None
|
|
184
|
-
# token usage data
|
|
185
|
-
usage: UsageData | None = None
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
class ChatCompletionChoicesChunk(BaseModel):
|
|
189
|
-
index: int | None = None
|
|
190
|
-
logprobs: float | None = None
|
|
191
|
-
seed: int | None = None
|
|
192
|
-
finish_reason: FinishReason | None = None
|
|
193
|
-
delta: DeltaContent | None = None
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
class ChatCompletionChunk(BaseModel):
|
|
197
|
-
# request id
|
|
198
|
-
id: str | None = None
|
|
199
|
-
# object type
|
|
200
|
-
object: ObjectType | None = None
|
|
201
|
-
# created timestamp
|
|
202
|
-
created: int | None = None
|
|
203
|
-
# model name
|
|
204
|
-
model: str | None = None
|
|
205
|
-
# delta content
|
|
206
|
-
choices: List[ChatCompletionChoicesChunk] | None = None
|
|
207
|
-
# finish reason
|
|
208
|
-
finish_reason: FinishReason | None = None
|
|
209
|
-
# token usage data
|
|
210
|
-
usage: UsageData | None = None
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import Any, Dict, Literal, Union
|
|
4
|
-
|
|
5
|
-
from pydantic import Field
|
|
6
|
-
|
|
7
|
-
from together.types.endpoints import TogetherJSONModel
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class FileInput(TogetherJSONModel):
|
|
11
|
-
"""File input to be uploaded to the code interpreter session."""
|
|
12
|
-
|
|
13
|
-
name: str = Field(description="The name of the file.")
|
|
14
|
-
encoding: Literal["string", "base64"] = Field(
|
|
15
|
-
description="Encoding of the file content. Use 'string' for text files and 'base64' for binary files."
|
|
16
|
-
)
|
|
17
|
-
content: str = Field(description="The content of the file, encoded as specified.")
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class InterpreterOutput(TogetherJSONModel):
|
|
21
|
-
"""Base class for interpreter output types."""
|
|
22
|
-
|
|
23
|
-
type: Literal["stdout", "stderr", "error", "display_data", "execute_result"] = (
|
|
24
|
-
Field(description="The type of output")
|
|
25
|
-
)
|
|
26
|
-
data: Union[str, Dict[str, Any]] = Field(description="The output data")
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class ExecuteResponseData(TogetherJSONModel):
|
|
30
|
-
"""Data from code execution response."""
|
|
31
|
-
|
|
32
|
-
outputs: list[InterpreterOutput] = Field(
|
|
33
|
-
description="List of outputs from execution", default_factory=list
|
|
34
|
-
)
|
|
35
|
-
errors: Union[str, None] = Field(
|
|
36
|
-
description="Any errors that occurred during execution", default=None
|
|
37
|
-
)
|
|
38
|
-
session_id: str = Field(
|
|
39
|
-
description="Identifier of the current session. Used to make follow-up calls."
|
|
40
|
-
)
|
|
41
|
-
status: str = Field(description="Status of the execution", default="completed")
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class ExecuteResponse(TogetherJSONModel):
|
|
45
|
-
"""Response from code execution."""
|
|
46
|
-
|
|
47
|
-
data: ExecuteResponseData = Field(
|
|
48
|
-
description="The response data containing outputs and session information"
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
__all__ = [
|
|
53
|
-
"FileInput",
|
|
54
|
-
"InterpreterOutput",
|
|
55
|
-
"ExecuteResponseData",
|
|
56
|
-
"ExecuteResponse",
|
|
57
|
-
]
|