together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -63
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +108 -75
- together/lib/cli/api/evals.py +588 -0
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
- together/lib/cli/api/models.py +140 -0
- together/{cli → lib/cli}/api/utils.py +6 -7
- together/{cli → lib/cli}/cli.py +16 -24
- together/{constants.py → lib/constants.py} +17 -12
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +455 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/lib/utils/files.py +628 -0
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -33
- together/resources/audio/__init__.py +72 -21
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -122
- together/resources/audio/transcriptions.py +282 -0
- together/resources/audio/translations.py +256 -0
- together/resources/audio/voices.py +135 -0
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +598 -395
- together/resources/evals.py +452 -0
- together/resources/files.py +398 -121
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +256 -108
- together/resources/jobs.py +214 -0
- together/resources/models.py +238 -90
- together/resources/rerank.py +190 -92
- together/resources/videos.py +374 -0
- together/types/__init__.py +65 -109
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_job.py +57 -0
- together-2.0.0a8.dist-info/METADATA +680 -0
- together-2.0.0a8.dist-info/RECORD +164 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
- together-2.0.0a8.dist-info/entry_points.txt +2 -0
- {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -729
- together/cli/api/chat.py +0 -276
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/models.py +0 -55
- together/client.py +0 -176
- together/error.py +0 -194
- together/filemanager.py +0 -389
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -136
- together/resources/code_interpreter.py +0 -82
- together/resources/finetune.py +0 -1064
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -110
- together/types/batch.py +0 -53
- together/types/chat_completions.py +0 -197
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -66
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/files.py +0 -90
- together/types/finetune.py +0 -398
- together/types/images.py +0 -44
- together/types/models.py +0 -45
- together/types/rerank.py +0 -43
- together/utils/api_helpers.py +0 -124
- together/utils/files.py +0 -425
- together/version.py +0 -6
- together-1.5.17.dist-info/METADATA +0 -525
- together-1.5.17.dist-info/RECORD +0 -69
- together-1.5.17.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
together/together_response.py
DELETED
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import Any, Dict
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class TogetherResponse:
|
|
7
|
-
"""
|
|
8
|
-
API Response class. Stores headers and response data.
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
def __init__(self, data: Any, headers: Dict[str, Any]):
|
|
12
|
-
self._headers = headers
|
|
13
|
-
self.data = data
|
|
14
|
-
|
|
15
|
-
@property
|
|
16
|
-
def request_id(self) -> str | None:
|
|
17
|
-
"""
|
|
18
|
-
Fetches request id from headers
|
|
19
|
-
"""
|
|
20
|
-
if "cf-ray" in self._headers:
|
|
21
|
-
return str(self._headers["cf-ray"])
|
|
22
|
-
return None
|
|
23
|
-
|
|
24
|
-
@property
|
|
25
|
-
def requests_remaining(self) -> int | None:
|
|
26
|
-
"""
|
|
27
|
-
Number of requests remaining at current rate limit
|
|
28
|
-
"""
|
|
29
|
-
if "x-ratelimit-remaining" in self._headers:
|
|
30
|
-
return int(self._headers["x-ratelimit-remaining"])
|
|
31
|
-
return None
|
|
32
|
-
|
|
33
|
-
@property
|
|
34
|
-
def processed_by(self) -> str | None:
|
|
35
|
-
"""
|
|
36
|
-
Processing host server name
|
|
37
|
-
"""
|
|
38
|
-
if "x-hostname" in self._headers:
|
|
39
|
-
return str(self._headers["x-hostname"])
|
|
40
|
-
return None
|
|
41
|
-
|
|
42
|
-
@property
|
|
43
|
-
def response_ms(self) -> int | None:
|
|
44
|
-
"""
|
|
45
|
-
Server request completion time
|
|
46
|
-
"""
|
|
47
|
-
if "x-total-time" in self._headers:
|
|
48
|
-
h = self._headers["x-total-time"]
|
|
49
|
-
return None if h is None else round(float(h))
|
|
50
|
-
return None
|
together/types/abstract.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from typing import Dict
|
|
5
|
-
|
|
6
|
-
import pydantic
|
|
7
|
-
from pydantic import ConfigDict
|
|
8
|
-
from typing_extensions import ClassVar
|
|
9
|
-
|
|
10
|
-
from together.constants import BASE_URL, MAX_RETRIES, TIMEOUT_SECS
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@dataclass
|
|
17
|
-
class TogetherClient:
|
|
18
|
-
api_key: str | None = None
|
|
19
|
-
base_url: str | None = BASE_URL
|
|
20
|
-
timeout: float | None = TIMEOUT_SECS
|
|
21
|
-
max_retries: int | None = MAX_RETRIES
|
|
22
|
-
supplied_headers: Dict[str, str] | None = None
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class BaseModel(pydantic.BaseModel):
|
|
26
|
-
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
|
together/types/audio_speech.py
DELETED
|
@@ -1,110 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from typing import Iterator
|
|
5
|
-
import threading
|
|
6
|
-
|
|
7
|
-
from pydantic import BaseModel, ConfigDict
|
|
8
|
-
|
|
9
|
-
from together.together_response import TogetherResponse
|
|
10
|
-
import base64
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class AudioResponseFormat(str, Enum):
|
|
14
|
-
MP3 = "mp3"
|
|
15
|
-
WAV = "wav"
|
|
16
|
-
RAW = "raw"
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class AudioLanguage(str, Enum):
|
|
20
|
-
EN = "en"
|
|
21
|
-
DE = "de"
|
|
22
|
-
FR = "fr"
|
|
23
|
-
ES = "es"
|
|
24
|
-
HI = "hi"
|
|
25
|
-
IT = "it"
|
|
26
|
-
JA = "ja"
|
|
27
|
-
KO = "ko"
|
|
28
|
-
NL = "nl"
|
|
29
|
-
PL = "pl"
|
|
30
|
-
PT = "pt"
|
|
31
|
-
RU = "ru"
|
|
32
|
-
SV = "sv"
|
|
33
|
-
TR = "tr"
|
|
34
|
-
ZH = "zh"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class AudioResponseEncoding(str, Enum):
|
|
38
|
-
PCM_F32LE = "pcm_f32le"
|
|
39
|
-
PCM_S16LE = "pcm_s16le"
|
|
40
|
-
PCM_MULAW = "pcm_mulaw"
|
|
41
|
-
PCM_ALAW = "pcm_alaw"
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class AudioObjectType(str, Enum):
|
|
45
|
-
AUDIO_TTS_CHUNK = "audio.tts.chunk"
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class StreamSentinelType(str, Enum):
|
|
49
|
-
DONE = "[DONE]"
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class AudioSpeechRequest(BaseModel):
|
|
53
|
-
model: str
|
|
54
|
-
input: str
|
|
55
|
-
voice: str | None = None
|
|
56
|
-
response_format: AudioResponseFormat = AudioResponseFormat.MP3
|
|
57
|
-
language: AudioLanguage = AudioLanguage.EN
|
|
58
|
-
response_encoding: AudioResponseEncoding = AudioResponseEncoding.PCM_F32LE
|
|
59
|
-
sample_rate: int = 44100
|
|
60
|
-
stream: bool = False
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
class AudioSpeechStreamChunk(BaseModel):
|
|
64
|
-
object: AudioObjectType = AudioObjectType.AUDIO_TTS_CHUNK
|
|
65
|
-
model: str
|
|
66
|
-
b64: str
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
class AudioSpeechStreamEvent(BaseModel):
|
|
70
|
-
data: AudioSpeechStreamChunk
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
class StreamSentinel(BaseModel):
|
|
74
|
-
data: StreamSentinelType = StreamSentinelType.DONE
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class AudioSpeechStreamEventResponse(BaseModel):
|
|
78
|
-
response: AudioSpeechStreamEvent | StreamSentinel
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
class AudioSpeechStreamResponse(BaseModel):
|
|
82
|
-
|
|
83
|
-
response: TogetherResponse | Iterator[TogetherResponse]
|
|
84
|
-
|
|
85
|
-
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
86
|
-
|
|
87
|
-
def stream_to_file(self, file_path: str) -> None:
|
|
88
|
-
|
|
89
|
-
if isinstance(self.response, TogetherResponse):
|
|
90
|
-
# save response to file
|
|
91
|
-
with open(file_path, "wb") as f:
|
|
92
|
-
f.write(self.response.data)
|
|
93
|
-
|
|
94
|
-
elif isinstance(self.response, Iterator):
|
|
95
|
-
|
|
96
|
-
with open(file_path, "wb") as f:
|
|
97
|
-
for chunk in self.response:
|
|
98
|
-
|
|
99
|
-
# Try to parse as stream chunk
|
|
100
|
-
stream_event_response = AudioSpeechStreamEventResponse(
|
|
101
|
-
response={"data": chunk.data}
|
|
102
|
-
)
|
|
103
|
-
|
|
104
|
-
if isinstance(stream_event_response.response, StreamSentinel):
|
|
105
|
-
break
|
|
106
|
-
|
|
107
|
-
# decode base64
|
|
108
|
-
audio = base64.b64decode(stream_event_response.response.data.b64)
|
|
109
|
-
|
|
110
|
-
f.write(audio)
|
together/types/batch.py
DELETED
|
@@ -1,53 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from typing import Optional
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
|
|
7
|
-
from pydantic import Field
|
|
8
|
-
|
|
9
|
-
from together.types.abstract import BaseModel
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class BatchJobStatus(str, Enum):
|
|
13
|
-
"""
|
|
14
|
-
The status of a batch job
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
VALIDATING = "VALIDATING"
|
|
18
|
-
IN_PROGRESS = "IN_PROGRESS"
|
|
19
|
-
COMPLETED = "COMPLETED"
|
|
20
|
-
FAILED = "FAILED"
|
|
21
|
-
EXPIRED = "EXPIRED"
|
|
22
|
-
CANCELLED = "CANCELLED"
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class BatchEndpoint(str, Enum):
|
|
26
|
-
"""
|
|
27
|
-
The endpoint of a batch job
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
COMPLETIONS = "/v1/completions"
|
|
31
|
-
CHAT_COMPLETIONS = "/v1/chat/completions"
|
|
32
|
-
# More endpoints can be added here as needed
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
class BatchJob(BaseModel):
|
|
36
|
-
"""
|
|
37
|
-
A batch job object
|
|
38
|
-
"""
|
|
39
|
-
|
|
40
|
-
id: str
|
|
41
|
-
user_id: str
|
|
42
|
-
input_file_id: str
|
|
43
|
-
file_size_bytes: int
|
|
44
|
-
status: BatchJobStatus
|
|
45
|
-
job_deadline: datetime
|
|
46
|
-
created_at: datetime
|
|
47
|
-
endpoint: str
|
|
48
|
-
progress: float = 0.0
|
|
49
|
-
model_id: Optional[str] = None
|
|
50
|
-
output_file_id: Optional[str] = None
|
|
51
|
-
error_file_id: Optional[str] = None
|
|
52
|
-
error: Optional[str] = None
|
|
53
|
-
completed_at: Optional[datetime] = None
|
|
@@ -1,197 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import warnings
|
|
4
|
-
from enum import Enum
|
|
5
|
-
from typing import Any, Dict, List
|
|
6
|
-
|
|
7
|
-
from pydantic import model_validator
|
|
8
|
-
from typing_extensions import Self
|
|
9
|
-
|
|
10
|
-
from together.types.abstract import BaseModel
|
|
11
|
-
from together.types.common import (
|
|
12
|
-
DeltaContent,
|
|
13
|
-
FinishReason,
|
|
14
|
-
LogprobsPart,
|
|
15
|
-
ObjectType,
|
|
16
|
-
PromptPart,
|
|
17
|
-
UsageData,
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class MessageRole(str, Enum):
|
|
22
|
-
ASSISTANT = "assistant"
|
|
23
|
-
SYSTEM = "system"
|
|
24
|
-
USER = "user"
|
|
25
|
-
TOOL = "tool"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class ResponseFormatType(str, Enum):
|
|
29
|
-
JSON_OBJECT = "json_object"
|
|
30
|
-
JSON_SCHEMA = "json_schema"
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class FunctionCall(BaseModel):
|
|
34
|
-
name: str | None = None
|
|
35
|
-
arguments: str | None = None
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
class ToolCalls(BaseModel):
|
|
39
|
-
id: str | None = None
|
|
40
|
-
type: str | None = None
|
|
41
|
-
function: FunctionCall | None = None
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class ChatCompletionMessageContentType(str, Enum):
|
|
45
|
-
TEXT = "text"
|
|
46
|
-
IMAGE_URL = "image_url"
|
|
47
|
-
VIDEO_URL = "video_url"
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
class ChatCompletionMessageContentImageURL(BaseModel):
|
|
51
|
-
url: str
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
class ChatCompletionMessageContentVideoURL(BaseModel):
|
|
55
|
-
url: str
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
class ChatCompletionMessageContent(BaseModel):
|
|
59
|
-
type: ChatCompletionMessageContentType
|
|
60
|
-
text: str | None = None
|
|
61
|
-
image_url: ChatCompletionMessageContentImageURL | None = None
|
|
62
|
-
video_url: ChatCompletionMessageContentVideoURL | None = None
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
class ChatCompletionMessage(BaseModel):
|
|
66
|
-
role: MessageRole
|
|
67
|
-
content: str | List[ChatCompletionMessageContent] | None = None
|
|
68
|
-
tool_calls: List[ToolCalls] | None = None
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
class ResponseFormat(BaseModel):
|
|
72
|
-
type: ResponseFormatType
|
|
73
|
-
schema_: Dict[str, Any] | None = None
|
|
74
|
-
|
|
75
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
76
|
-
return {"schema": self.schema_, "type": self.type}
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
class FunctionTool(BaseModel):
|
|
80
|
-
description: str | None = None
|
|
81
|
-
name: str
|
|
82
|
-
parameters: Dict[str, Any] | None = None
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
class FunctionToolChoice(BaseModel):
|
|
86
|
-
name: str
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
class Tools(BaseModel):
|
|
90
|
-
type: str
|
|
91
|
-
function: FunctionTool
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
class ToolChoice(BaseModel):
|
|
95
|
-
type: str
|
|
96
|
-
function: FunctionToolChoice
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
class ToolChoiceEnum(str, Enum):
|
|
100
|
-
Auto = "auto"
|
|
101
|
-
Required = "required"
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
class ChatCompletionRequest(BaseModel):
|
|
105
|
-
# list of messages
|
|
106
|
-
messages: List[ChatCompletionMessage]
|
|
107
|
-
# model name
|
|
108
|
-
model: str
|
|
109
|
-
# stopping criteria: max tokens to generate
|
|
110
|
-
max_tokens: int | None = None
|
|
111
|
-
# stopping criteria: list of strings to stop generation
|
|
112
|
-
stop: List[str] | None = None
|
|
113
|
-
# sampling hyperparameters
|
|
114
|
-
temperature: float | None = None
|
|
115
|
-
top_p: float | None = None
|
|
116
|
-
top_k: int | None = None
|
|
117
|
-
repetition_penalty: float | None = None
|
|
118
|
-
presence_penalty: float | None = None
|
|
119
|
-
frequency_penalty: float | None = None
|
|
120
|
-
min_p: float | None = None
|
|
121
|
-
logit_bias: Dict[str, float] | None = None
|
|
122
|
-
seed: int | None = None
|
|
123
|
-
# stream SSE token chunks
|
|
124
|
-
stream: bool = False
|
|
125
|
-
# return logprobs
|
|
126
|
-
logprobs: int | None = None
|
|
127
|
-
# echo prompt.
|
|
128
|
-
# can be used with logprobs to return prompt logprobs
|
|
129
|
-
echo: bool | None = None
|
|
130
|
-
# number of output generations
|
|
131
|
-
n: int | None = None
|
|
132
|
-
# moderation model
|
|
133
|
-
safety_model: str | None = None
|
|
134
|
-
# constraints
|
|
135
|
-
response_format: ResponseFormat | None = None
|
|
136
|
-
tools: List[Tools] | None = None
|
|
137
|
-
tool_choice: ToolChoice | ToolChoiceEnum | None = None
|
|
138
|
-
|
|
139
|
-
# Raise warning if repetition_penalty is used with presence_penalty or frequency_penalty
|
|
140
|
-
@model_validator(mode="after")
|
|
141
|
-
def verify_parameters(self) -> Self:
|
|
142
|
-
if self.repetition_penalty:
|
|
143
|
-
if self.presence_penalty or self.frequency_penalty:
|
|
144
|
-
warnings.warn(
|
|
145
|
-
"repetition_penalty is not advisable to be used alongside presence_penalty or frequency_penalty"
|
|
146
|
-
)
|
|
147
|
-
return self
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
class ChatCompletionChoicesData(BaseModel):
|
|
151
|
-
index: int | None = None
|
|
152
|
-
logprobs: LogprobsPart | None = None
|
|
153
|
-
seed: int | None = None
|
|
154
|
-
finish_reason: FinishReason | None = None
|
|
155
|
-
message: ChatCompletionMessage | None = None
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
class ChatCompletionResponse(BaseModel):
|
|
159
|
-
# request id
|
|
160
|
-
id: str | None = None
|
|
161
|
-
# object type
|
|
162
|
-
object: ObjectType | None = None
|
|
163
|
-
# created timestamp
|
|
164
|
-
created: int | None = None
|
|
165
|
-
# model name
|
|
166
|
-
model: str | None = None
|
|
167
|
-
# choices list
|
|
168
|
-
choices: List[ChatCompletionChoicesData] | None = None
|
|
169
|
-
# prompt list
|
|
170
|
-
prompt: List[PromptPart] | List[None] | None = None
|
|
171
|
-
# token usage data
|
|
172
|
-
usage: UsageData | None = None
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
class ChatCompletionChoicesChunk(BaseModel):
|
|
176
|
-
index: int | None = None
|
|
177
|
-
logprobs: float | None = None
|
|
178
|
-
seed: int | None = None
|
|
179
|
-
finish_reason: FinishReason | None = None
|
|
180
|
-
delta: DeltaContent | None = None
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
class ChatCompletionChunk(BaseModel):
|
|
184
|
-
# request id
|
|
185
|
-
id: str | None = None
|
|
186
|
-
# object type
|
|
187
|
-
object: ObjectType | None = None
|
|
188
|
-
# created timestamp
|
|
189
|
-
created: int | None = None
|
|
190
|
-
# model name
|
|
191
|
-
model: str | None = None
|
|
192
|
-
# delta content
|
|
193
|
-
choices: List[ChatCompletionChoicesChunk] | None = None
|
|
194
|
-
# finish reason
|
|
195
|
-
finish_reason: FinishReason | None = None
|
|
196
|
-
# token usage data
|
|
197
|
-
usage: UsageData | None = None
|
|
@@ -1,57 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import Any, Dict, Literal, Union
|
|
4
|
-
|
|
5
|
-
from pydantic import Field
|
|
6
|
-
|
|
7
|
-
from together.types.endpoints import TogetherJSONModel
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class FileInput(TogetherJSONModel):
|
|
11
|
-
"""File input to be uploaded to the code interpreter session."""
|
|
12
|
-
|
|
13
|
-
name: str = Field(description="The name of the file.")
|
|
14
|
-
encoding: Literal["string", "base64"] = Field(
|
|
15
|
-
description="Encoding of the file content. Use 'string' for text files and 'base64' for binary files."
|
|
16
|
-
)
|
|
17
|
-
content: str = Field(description="The content of the file, encoded as specified.")
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class InterpreterOutput(TogetherJSONModel):
|
|
21
|
-
"""Base class for interpreter output types."""
|
|
22
|
-
|
|
23
|
-
type: Literal["stdout", "stderr", "error", "display_data", "execute_result"] = (
|
|
24
|
-
Field(description="The type of output")
|
|
25
|
-
)
|
|
26
|
-
data: Union[str, Dict[str, Any]] = Field(description="The output data")
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class ExecuteResponseData(TogetherJSONModel):
|
|
30
|
-
"""Data from code execution response."""
|
|
31
|
-
|
|
32
|
-
outputs: list[InterpreterOutput] = Field(
|
|
33
|
-
description="List of outputs from execution", default_factory=list
|
|
34
|
-
)
|
|
35
|
-
errors: Union[str, None] = Field(
|
|
36
|
-
description="Any errors that occurred during execution", default=None
|
|
37
|
-
)
|
|
38
|
-
session_id: str = Field(
|
|
39
|
-
description="Identifier of the current session. Used to make follow-up calls."
|
|
40
|
-
)
|
|
41
|
-
status: str = Field(description="Status of the execution", default="completed")
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
class ExecuteResponse(TogetherJSONModel):
|
|
45
|
-
"""Response from code execution."""
|
|
46
|
-
|
|
47
|
-
data: ExecuteResponseData = Field(
|
|
48
|
-
description="The response data containing outputs and session information"
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
__all__ = [
|
|
53
|
-
"FileInput",
|
|
54
|
-
"InterpreterOutput",
|
|
55
|
-
"ExecuteResponseData",
|
|
56
|
-
"ExecuteResponse",
|
|
57
|
-
]
|
together/types/common.py
DELETED
|
@@ -1,66 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from typing import Any, Dict, List
|
|
5
|
-
|
|
6
|
-
from pydantic import ConfigDict
|
|
7
|
-
from tqdm.utils import CallbackIOWrapper
|
|
8
|
-
|
|
9
|
-
from together.types.abstract import BaseModel
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
# Generation finish reason
|
|
13
|
-
class FinishReason(str, Enum):
|
|
14
|
-
Length = "length"
|
|
15
|
-
StopSequence = "stop"
|
|
16
|
-
EOS = "eos"
|
|
17
|
-
ToolCalls = "tool_calls"
|
|
18
|
-
Error = "error"
|
|
19
|
-
Null = ""
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class UsageData(BaseModel):
|
|
23
|
-
prompt_tokens: int
|
|
24
|
-
completion_tokens: int
|
|
25
|
-
total_tokens: int
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class ObjectType(str, Enum):
|
|
29
|
-
Completion = "text.completion"
|
|
30
|
-
CompletionChunk = "completion.chunk"
|
|
31
|
-
ChatCompletion = "chat.completion"
|
|
32
|
-
ChatCompletionChunk = "chat.completion.chunk"
|
|
33
|
-
Embedding = "embedding"
|
|
34
|
-
FinetuneEvent = "fine-tune-event"
|
|
35
|
-
File = "file"
|
|
36
|
-
Model = "model"
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
class LogprobsPart(BaseModel):
|
|
40
|
-
# token list
|
|
41
|
-
tokens: List[str | None] | None = None
|
|
42
|
-
# token logprob list
|
|
43
|
-
token_logprobs: List[float | None] | None = None
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
class PromptPart(BaseModel):
|
|
47
|
-
# prompt string
|
|
48
|
-
text: str | None = None
|
|
49
|
-
# list of prompt logprobs
|
|
50
|
-
logprobs: LogprobsPart | None = None
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class DeltaContent(BaseModel):
|
|
54
|
-
content: str | None = None
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
class TogetherRequest(BaseModel):
|
|
58
|
-
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
59
|
-
|
|
60
|
-
method: str
|
|
61
|
-
url: str
|
|
62
|
-
headers: Dict[str, str] | None = None
|
|
63
|
-
params: Dict[str, Any] | CallbackIOWrapper | None = None
|
|
64
|
-
files: Dict[str, Any] | None = None
|
|
65
|
-
allow_redirects: bool = True
|
|
66
|
-
override_headers: bool = False
|
together/types/completions.py
DELETED
|
@@ -1,107 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import warnings
|
|
4
|
-
from typing import Dict, List
|
|
5
|
-
|
|
6
|
-
from pydantic import model_validator
|
|
7
|
-
from typing_extensions import Self
|
|
8
|
-
|
|
9
|
-
from together.types.abstract import BaseModel
|
|
10
|
-
from together.types.common import (
|
|
11
|
-
DeltaContent,
|
|
12
|
-
FinishReason,
|
|
13
|
-
LogprobsPart,
|
|
14
|
-
ObjectType,
|
|
15
|
-
PromptPart,
|
|
16
|
-
UsageData,
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class CompletionRequest(BaseModel):
|
|
21
|
-
# prompt to complete
|
|
22
|
-
prompt: str
|
|
23
|
-
# query model
|
|
24
|
-
model: str
|
|
25
|
-
# stopping criteria: max tokens to generate
|
|
26
|
-
max_tokens: int | None = None
|
|
27
|
-
# stopping criteria: list of strings to stop generation
|
|
28
|
-
stop: List[str] | None = None
|
|
29
|
-
# sampling hyperparameters
|
|
30
|
-
temperature: float | None = None
|
|
31
|
-
top_p: float | None = None
|
|
32
|
-
top_k: int | None = None
|
|
33
|
-
repetition_penalty: float | None = None
|
|
34
|
-
presence_penalty: float | None = None
|
|
35
|
-
frequency_penalty: float | None = None
|
|
36
|
-
min_p: float | None = None
|
|
37
|
-
logit_bias: Dict[str, float] | None = None
|
|
38
|
-
seed: int | None = None
|
|
39
|
-
# stream SSE token chunks
|
|
40
|
-
stream: bool = False
|
|
41
|
-
# return logprobs
|
|
42
|
-
logprobs: int | None = None
|
|
43
|
-
# echo prompt.
|
|
44
|
-
# can be used with logprobs to return prompt logprobs
|
|
45
|
-
echo: bool | None = None
|
|
46
|
-
# number of output generations
|
|
47
|
-
n: int | None = None
|
|
48
|
-
# moderation model
|
|
49
|
-
safety_model: str | None = None
|
|
50
|
-
|
|
51
|
-
# Raise warning if repetition_penalty is used with presence_penalty or frequency_penalty
|
|
52
|
-
@model_validator(mode="after")
|
|
53
|
-
def verify_parameters(self) -> Self:
|
|
54
|
-
if self.repetition_penalty:
|
|
55
|
-
if self.presence_penalty or self.frequency_penalty:
|
|
56
|
-
warnings.warn(
|
|
57
|
-
"repetition_penalty is not advisable to be used alongside presence_penalty or frequency_penalty"
|
|
58
|
-
)
|
|
59
|
-
return self
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
class CompletionChoicesData(BaseModel):
|
|
63
|
-
index: int
|
|
64
|
-
logprobs: LogprobsPart | None = None
|
|
65
|
-
seed: int | None = None
|
|
66
|
-
finish_reason: FinishReason
|
|
67
|
-
text: str
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
class CompletionChoicesChunk(BaseModel):
|
|
71
|
-
index: int | None = None
|
|
72
|
-
logprobs: float | None = None
|
|
73
|
-
seed: int | None = None
|
|
74
|
-
finish_reason: FinishReason | None = None
|
|
75
|
-
delta: DeltaContent | None = None
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
class CompletionResponse(BaseModel):
|
|
79
|
-
# request id
|
|
80
|
-
id: str | None = None
|
|
81
|
-
# object type
|
|
82
|
-
object: ObjectType | None = None
|
|
83
|
-
# created timestamp
|
|
84
|
-
created: int | None = None
|
|
85
|
-
# model name
|
|
86
|
-
model: str | None = None
|
|
87
|
-
# choices list
|
|
88
|
-
choices: List[CompletionChoicesData] | None = None
|
|
89
|
-
# prompt list
|
|
90
|
-
prompt: List[PromptPart] | None = None
|
|
91
|
-
# token usage data
|
|
92
|
-
usage: UsageData | None = None
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
class CompletionChunk(BaseModel):
|
|
96
|
-
# request id
|
|
97
|
-
id: str | None = None
|
|
98
|
-
# object type
|
|
99
|
-
object: ObjectType | None = None
|
|
100
|
-
# created timestamp
|
|
101
|
-
created: int | None = None
|
|
102
|
-
# model name
|
|
103
|
-
model: str | None = None
|
|
104
|
-
# choices list
|
|
105
|
-
choices: List[CompletionChoicesChunk] | None = None
|
|
106
|
-
# token usage data
|
|
107
|
-
usage: UsageData | None = None
|
together/types/embeddings.py
DELETED
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import List, Literal
|
|
4
|
-
|
|
5
|
-
from together.types.abstract import BaseModel
|
|
6
|
-
from together.types.common import (
|
|
7
|
-
ObjectType,
|
|
8
|
-
)
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class EmbeddingRequest(BaseModel):
|
|
12
|
-
# input or list of inputs
|
|
13
|
-
input: str | List[str]
|
|
14
|
-
# model to query
|
|
15
|
-
model: str
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class EmbeddingChoicesData(BaseModel):
|
|
19
|
-
# response index
|
|
20
|
-
index: int
|
|
21
|
-
# object type
|
|
22
|
-
object: ObjectType
|
|
23
|
-
# embedding response
|
|
24
|
-
embedding: List[float] | None = None
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class EmbeddingResponse(BaseModel):
|
|
28
|
-
# job id
|
|
29
|
-
id: str | None = None
|
|
30
|
-
# query model
|
|
31
|
-
model: str | None = None
|
|
32
|
-
# object type
|
|
33
|
-
object: Literal["list"] | None = None
|
|
34
|
-
# list of embedding choices
|
|
35
|
-
data: List[EmbeddingChoicesData] | None = None
|