together 1.5.35__py3-none-any.whl → 2.0.0a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -114
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/{cli → lib/cli}/api/endpoints.py +66 -84
- together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
- together/{cli → lib/cli}/api/files.py +20 -17
- together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
- together/{cli → lib/cli}/api/models.py +34 -27
- together/lib/cli/api/utils.py +50 -0
- together/{cli → lib/cli}/cli.py +16 -26
- together/{constants.py → lib/constants.py} +11 -24
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +397 -0
- together/{utils → lib/utils}/__init__.py +6 -14
- together/{utils → lib/utils}/_log.py +11 -16
- together/{utils → lib/utils}/files.py +90 -288
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +19 -55
- together/resources/__init__.py +225 -39
- together/resources/audio/__init__.py +72 -48
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +574 -128
- together/resources/audio/transcriptions.py +247 -261
- together/resources/audio/translations.py +221 -241
- together/resources/audio/voices.py +111 -41
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -263
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +884 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +589 -490
- together/resources/evals.py +452 -0
- together/resources/files.py +397 -129
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +258 -104
- together/resources/jobs.py +214 -0
- together/resources/models.py +223 -193
- together/resources/rerank.py +190 -92
- together/resources/videos.py +286 -214
- together/types/__init__.py +66 -167
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_create_response.py +10 -0
- together/types/video_job.py +57 -0
- together-2.0.0a6.dist-info/METADATA +729 -0
- together-2.0.0a6.dist-info/RECORD +165 -0
- {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
- together-2.0.0a6.dist-info/entry_points.txt +2 -0
- {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -770
- together/cli/api/chat.py +0 -298
- together/cli/api/completions.py +0 -119
- together/cli/api/images.py +0 -93
- together/cli/api/utils.py +0 -139
- together/client.py +0 -186
- together/error.py +0 -194
- together/filemanager.py +0 -635
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/batch.py +0 -165
- together/resources/code_interpreter.py +0 -82
- together/resources/evaluation.py +0 -808
- together/resources/finetune.py +0 -1388
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/audio_speech.py +0 -311
- together/types/batch.py +0 -54
- together/types/chat_completions.py +0 -210
- together/types/code_interpreter.py +0 -57
- together/types/common.py +0 -67
- together/types/completions.py +0 -107
- together/types/embeddings.py +0 -35
- together/types/endpoints.py +0 -123
- together/types/error.py +0 -16
- together/types/evaluation.py +0 -93
- together/types/files.py +0 -93
- together/types/finetune.py +0 -465
- together/types/images.py +0 -42
- together/types/models.py +0 -96
- together/types/rerank.py +0 -43
- together/types/videos.py +0 -69
- together/utils/api_helpers.py +0 -124
- together/version.py +0 -6
- together-1.5.35.dist-info/METADATA +0 -583
- together-1.5.35.dist-info/RECORD +0 -77
- together-1.5.35.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
together/types/finetune.py
DELETED
|
@@ -1,465 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from typing import Any, List, Literal
|
|
5
|
-
|
|
6
|
-
from pydantic import Field, StrictBool, field_validator
|
|
7
|
-
|
|
8
|
-
from together.types.abstract import BaseModel
|
|
9
|
-
from together.types.common import ObjectType
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class FinetuneJobStatus(str, Enum):
|
|
13
|
-
"""
|
|
14
|
-
Possible fine-tune job status
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
STATUS_PENDING = "pending"
|
|
18
|
-
STATUS_QUEUED = "queued"
|
|
19
|
-
STATUS_RUNNING = "running"
|
|
20
|
-
STATUS_COMPRESSING = "compressing"
|
|
21
|
-
STATUS_UPLOADING = "uploading"
|
|
22
|
-
STATUS_CANCEL_REQUESTED = "cancel_requested"
|
|
23
|
-
STATUS_CANCELLED = "cancelled"
|
|
24
|
-
STATUS_ERROR = "error"
|
|
25
|
-
STATUS_USER_ERROR = "user_error"
|
|
26
|
-
STATUS_COMPLETED = "completed"
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
COMPLETED_STATUSES = [
|
|
30
|
-
FinetuneJobStatus.STATUS_ERROR,
|
|
31
|
-
FinetuneJobStatus.STATUS_USER_ERROR,
|
|
32
|
-
FinetuneJobStatus.STATUS_COMPLETED,
|
|
33
|
-
FinetuneJobStatus.STATUS_CANCELLED,
|
|
34
|
-
]
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class FinetuneEventLevels(str, Enum):
|
|
38
|
-
"""
|
|
39
|
-
Fine-tune job event status levels
|
|
40
|
-
"""
|
|
41
|
-
|
|
42
|
-
NULL = ""
|
|
43
|
-
INFO = "Info"
|
|
44
|
-
WARNING = "Warning"
|
|
45
|
-
ERROR = "Error"
|
|
46
|
-
LEGACY_INFO = "info"
|
|
47
|
-
LEGACY_IWARNING = "warning"
|
|
48
|
-
LEGACY_IERROR = "error"
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
class FinetuneEventType(str, Enum):
|
|
52
|
-
"""
|
|
53
|
-
Fine-tune job event types
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
JOB_PENDING = "JOB_PENDING"
|
|
57
|
-
JOB_START = "JOB_START"
|
|
58
|
-
JOB_STOPPED = "JOB_STOPPED"
|
|
59
|
-
MODEL_DOWNLOADING = "MODEL_DOWNLOADING"
|
|
60
|
-
MODEL_DOWNLOAD_COMPLETE = "MODEL_DOWNLOAD_COMPLETE"
|
|
61
|
-
TRAINING_DATA_DOWNLOADING = "TRAINING_DATA_DOWNLOADING"
|
|
62
|
-
TRAINING_DATA_DOWNLOAD_COMPLETE = "TRAINING_DATA_DOWNLOAD_COMPLETE"
|
|
63
|
-
VALIDATION_DATA_DOWNLOADING = "VALIDATION_DATA_DOWNLOADING"
|
|
64
|
-
VALIDATION_DATA_DOWNLOAD_COMPLETE = "VALIDATION_DATA_DOWNLOAD_COMPLETE"
|
|
65
|
-
WANDB_INIT = "WANDB_INIT"
|
|
66
|
-
TRAINING_START = "TRAINING_START"
|
|
67
|
-
CHECKPOINT_SAVE = "CHECKPOINT_SAVE"
|
|
68
|
-
BILLING_LIMIT = "BILLING_LIMIT"
|
|
69
|
-
EPOCH_COMPLETE = "EPOCH_COMPLETE"
|
|
70
|
-
EVAL_COMPLETE = "EVAL_COMPLETE"
|
|
71
|
-
TRAINING_COMPLETE = "TRAINING_COMPLETE"
|
|
72
|
-
MODEL_COMPRESSING = "COMPRESSING_MODEL"
|
|
73
|
-
MODEL_COMPRESSION_COMPLETE = "MODEL_COMPRESSION_COMPLETE"
|
|
74
|
-
MODEL_UPLOADING = "MODEL_UPLOADING"
|
|
75
|
-
MODEL_UPLOAD_COMPLETE = "MODEL_UPLOAD_COMPLETE"
|
|
76
|
-
MODEL_UPLOADING_TO_HF = "MODEL_UPLOADING_TO_HF"
|
|
77
|
-
MODEL_UPLOAD_TO_HF_COMPLETE = "MODEL_UPLOAD_TO_HF_COMPLETE"
|
|
78
|
-
JOB_COMPLETE = "JOB_COMPLETE"
|
|
79
|
-
JOB_ERROR = "JOB_ERROR"
|
|
80
|
-
JOB_USER_ERROR = "JOB_USER_ERROR"
|
|
81
|
-
CANCEL_REQUESTED = "CANCEL_REQUESTED"
|
|
82
|
-
JOB_RESTARTED = "JOB_RESTARTED"
|
|
83
|
-
REFUND = "REFUND"
|
|
84
|
-
WARNING = "WARNING"
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
class DownloadCheckpointType(Enum):
|
|
88
|
-
DEFAULT = "default"
|
|
89
|
-
MERGED = "merged"
|
|
90
|
-
ADAPTER = "adapter"
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
class FinetuneEvent(BaseModel):
|
|
94
|
-
"""
|
|
95
|
-
Fine-tune event type
|
|
96
|
-
"""
|
|
97
|
-
|
|
98
|
-
# object type
|
|
99
|
-
object: Literal[ObjectType.FinetuneEvent]
|
|
100
|
-
# created at datetime stamp
|
|
101
|
-
created_at: str | None = None
|
|
102
|
-
# event log level
|
|
103
|
-
level: FinetuneEventLevels | None = None
|
|
104
|
-
# event message string
|
|
105
|
-
message: str | None = None
|
|
106
|
-
# event type
|
|
107
|
-
type: FinetuneEventType | None = None
|
|
108
|
-
# optional: model parameter count
|
|
109
|
-
param_count: int | None = None
|
|
110
|
-
# optional: dataset token count
|
|
111
|
-
token_count: int | None = None
|
|
112
|
-
# optional: weights & biases url
|
|
113
|
-
wandb_url: str | None = None
|
|
114
|
-
# event hash
|
|
115
|
-
hash: str | None = None
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
class TrainingType(BaseModel):
|
|
119
|
-
"""
|
|
120
|
-
Abstract training type
|
|
121
|
-
"""
|
|
122
|
-
|
|
123
|
-
type: str
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
class FullTrainingType(TrainingType):
|
|
127
|
-
"""
|
|
128
|
-
Training type for full fine-tuning
|
|
129
|
-
"""
|
|
130
|
-
|
|
131
|
-
type: str = "Full"
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
class LoRATrainingType(TrainingType):
|
|
135
|
-
"""
|
|
136
|
-
Training type for LoRA adapters training
|
|
137
|
-
"""
|
|
138
|
-
|
|
139
|
-
lora_r: int
|
|
140
|
-
lora_alpha: int
|
|
141
|
-
lora_dropout: float = 0.0
|
|
142
|
-
lora_trainable_modules: str = "all-linear"
|
|
143
|
-
type: str = "Lora"
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
class TrainingMethod(BaseModel):
|
|
147
|
-
"""
|
|
148
|
-
Training method type
|
|
149
|
-
"""
|
|
150
|
-
|
|
151
|
-
method: str
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
class TrainingMethodSFT(TrainingMethod):
|
|
155
|
-
"""
|
|
156
|
-
Training method type for SFT training
|
|
157
|
-
"""
|
|
158
|
-
|
|
159
|
-
method: Literal["sft"] = "sft"
|
|
160
|
-
train_on_inputs: StrictBool | Literal["auto"] = "auto"
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
class TrainingMethodDPO(TrainingMethod):
|
|
164
|
-
"""
|
|
165
|
-
Training method type for DPO training
|
|
166
|
-
"""
|
|
167
|
-
|
|
168
|
-
method: Literal["dpo"] = "dpo"
|
|
169
|
-
dpo_beta: float | None = None
|
|
170
|
-
dpo_normalize_logratios_by_length: bool = False
|
|
171
|
-
dpo_reference_free: bool = False
|
|
172
|
-
rpo_alpha: float | None = None
|
|
173
|
-
simpo_gamma: float | None = None
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
class FinetuneMultimodalParams(BaseModel):
|
|
177
|
-
"""
|
|
178
|
-
Multimodal parameters
|
|
179
|
-
"""
|
|
180
|
-
|
|
181
|
-
train_vision: bool = False
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
class FinetuneProgress(BaseModel):
|
|
185
|
-
"""
|
|
186
|
-
Fine-tune job progress
|
|
187
|
-
"""
|
|
188
|
-
|
|
189
|
-
estimate_available: bool = False
|
|
190
|
-
seconds_remaining: float = 0
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
class FinetuneRequest(BaseModel):
|
|
194
|
-
"""
|
|
195
|
-
Fine-tune request type
|
|
196
|
-
"""
|
|
197
|
-
|
|
198
|
-
# training file ID
|
|
199
|
-
training_file: str
|
|
200
|
-
# validation file id
|
|
201
|
-
validation_file: str | None = None
|
|
202
|
-
# base model string
|
|
203
|
-
model: str | None = None
|
|
204
|
-
# number of epochs to train for
|
|
205
|
-
n_epochs: int
|
|
206
|
-
# training learning rate
|
|
207
|
-
learning_rate: float
|
|
208
|
-
# learning rate scheduler type and args
|
|
209
|
-
lr_scheduler: LinearLRScheduler | CosineLRScheduler | None = None
|
|
210
|
-
# learning rate warmup ratio
|
|
211
|
-
warmup_ratio: float
|
|
212
|
-
# max gradient norm
|
|
213
|
-
max_grad_norm: float
|
|
214
|
-
# weight decay
|
|
215
|
-
weight_decay: float
|
|
216
|
-
# number of checkpoints to save
|
|
217
|
-
n_checkpoints: int | None = None
|
|
218
|
-
# number of evaluation loops to run
|
|
219
|
-
n_evals: int | None = None
|
|
220
|
-
# training batch size
|
|
221
|
-
batch_size: int | Literal["max"] | None = None
|
|
222
|
-
# up to 40 character suffix for output model name
|
|
223
|
-
suffix: str | None = None
|
|
224
|
-
# weights & biases api key
|
|
225
|
-
wandb_key: str | None = None
|
|
226
|
-
# weights & biases base url
|
|
227
|
-
wandb_base_url: str | None = None
|
|
228
|
-
# wandb project name
|
|
229
|
-
wandb_project_name: str | None = None
|
|
230
|
-
# wandb run name
|
|
231
|
-
wandb_name: str | None = None
|
|
232
|
-
# training type
|
|
233
|
-
training_type: FullTrainingType | LoRATrainingType | None = None
|
|
234
|
-
# training method
|
|
235
|
-
training_method: TrainingMethodSFT | TrainingMethodDPO = Field(
|
|
236
|
-
default_factory=TrainingMethodSFT
|
|
237
|
-
)
|
|
238
|
-
# from step
|
|
239
|
-
from_checkpoint: str | None = None
|
|
240
|
-
# multimodal parameters
|
|
241
|
-
multimodal_params: FinetuneMultimodalParams | None = None
|
|
242
|
-
# hf related fields
|
|
243
|
-
hf_api_token: str | None = None
|
|
244
|
-
hf_output_repo_name: str | None = None
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
class FinetuneResponse(BaseModel):
|
|
248
|
-
"""
|
|
249
|
-
Fine-tune API response type
|
|
250
|
-
"""
|
|
251
|
-
|
|
252
|
-
# job ID
|
|
253
|
-
id: str | None = None
|
|
254
|
-
# training file id
|
|
255
|
-
training_file: str | None = None
|
|
256
|
-
# validation file id
|
|
257
|
-
validation_file: str | None = None
|
|
258
|
-
# base model name
|
|
259
|
-
model: str | None = None
|
|
260
|
-
# output model name
|
|
261
|
-
output_name: str | None = Field(None, alias="model_output_name")
|
|
262
|
-
# adapter output name
|
|
263
|
-
adapter_output_name: str | None = None
|
|
264
|
-
# number of epochs
|
|
265
|
-
n_epochs: int | None = None
|
|
266
|
-
# number of checkpoints to save
|
|
267
|
-
n_checkpoints: int | None = None
|
|
268
|
-
# number of evaluation loops
|
|
269
|
-
n_evals: int | None = None
|
|
270
|
-
# training batch size
|
|
271
|
-
batch_size: int | None = None
|
|
272
|
-
# training learning rate
|
|
273
|
-
learning_rate: float | None = None
|
|
274
|
-
# learning rate scheduler type and args
|
|
275
|
-
lr_scheduler: LinearLRScheduler | CosineLRScheduler | EmptyLRScheduler | None = None
|
|
276
|
-
# learning rate warmup ratio
|
|
277
|
-
warmup_ratio: float | None = None
|
|
278
|
-
# max gradient norm
|
|
279
|
-
max_grad_norm: float | None = None
|
|
280
|
-
# weight decay
|
|
281
|
-
weight_decay: float | None = None
|
|
282
|
-
# number of steps between evals
|
|
283
|
-
eval_steps: int | None = None
|
|
284
|
-
# training type
|
|
285
|
-
training_type: TrainingType | None = None
|
|
286
|
-
# created/updated datetime stamps
|
|
287
|
-
created_at: str | None = None
|
|
288
|
-
updated_at: str | None = None
|
|
289
|
-
started_at: str | None = None
|
|
290
|
-
# job status
|
|
291
|
-
status: FinetuneJobStatus | None = None
|
|
292
|
-
# job id
|
|
293
|
-
job_id: str | None = None
|
|
294
|
-
# list of fine-tune events
|
|
295
|
-
events: List[FinetuneEvent] | None = None
|
|
296
|
-
# dataset token count
|
|
297
|
-
token_count: int | None = None
|
|
298
|
-
# model parameter count
|
|
299
|
-
param_count: int | None = None
|
|
300
|
-
# fine-tune job price
|
|
301
|
-
total_price: int | None = None
|
|
302
|
-
# total number of training steps
|
|
303
|
-
total_steps: int | None = None
|
|
304
|
-
# number of steps completed (incrementing counter)
|
|
305
|
-
steps_completed: int | None = None
|
|
306
|
-
# number of epochs completed (incrementing counter)
|
|
307
|
-
epochs_completed: int | None = None
|
|
308
|
-
# number of evaluation loops completed (incrementing counter)
|
|
309
|
-
evals_completed: int | None = None
|
|
310
|
-
# place in job queue (decrementing counter)
|
|
311
|
-
queue_depth: int | None = None
|
|
312
|
-
# weights & biases base url
|
|
313
|
-
wandb_base_url: str | None = None
|
|
314
|
-
# wandb project name
|
|
315
|
-
wandb_project_name: str | None = None
|
|
316
|
-
# wandb run name
|
|
317
|
-
wandb_name: str | None = None
|
|
318
|
-
# weights & biases job url
|
|
319
|
-
wandb_url: str | None = None
|
|
320
|
-
# training file metadata
|
|
321
|
-
training_file_num_lines: int | None = Field(None, alias="TrainingFileNumLines")
|
|
322
|
-
training_file_size: int | None = Field(None, alias="TrainingFileSize")
|
|
323
|
-
train_on_inputs: StrictBool | Literal["auto"] | None = "auto"
|
|
324
|
-
from_checkpoint: str | None = None
|
|
325
|
-
# multimodal parameters
|
|
326
|
-
multimodal_params: FinetuneMultimodalParams | None = None
|
|
327
|
-
|
|
328
|
-
progress: FinetuneProgress | None = None
|
|
329
|
-
|
|
330
|
-
@field_validator("training_type")
|
|
331
|
-
@classmethod
|
|
332
|
-
def validate_training_type(cls, v: TrainingType) -> TrainingType:
|
|
333
|
-
if v.type == "Full" or v.type == "":
|
|
334
|
-
return FullTrainingType(**v.model_dump())
|
|
335
|
-
elif v.type == "Lora":
|
|
336
|
-
return LoRATrainingType(**v.model_dump())
|
|
337
|
-
else:
|
|
338
|
-
raise ValueError("Unknown training type")
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
class FinetunePriceEstimationRequest(BaseModel):
|
|
342
|
-
"""
|
|
343
|
-
Fine-tune price estimation request type
|
|
344
|
-
"""
|
|
345
|
-
|
|
346
|
-
training_file: str
|
|
347
|
-
validation_file: str | None = None
|
|
348
|
-
model: str
|
|
349
|
-
n_epochs: int
|
|
350
|
-
n_evals: int
|
|
351
|
-
training_type: LoRATrainingType | FullTrainingType
|
|
352
|
-
training_method: TrainingMethodSFT | TrainingMethodDPO
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
class FinetunePriceEstimationResponse(BaseModel):
|
|
356
|
-
"""
|
|
357
|
-
Fine-tune price estimation response type
|
|
358
|
-
"""
|
|
359
|
-
|
|
360
|
-
estimated_total_price: float
|
|
361
|
-
user_limit: float
|
|
362
|
-
estimated_train_token_count: int
|
|
363
|
-
estimated_eval_token_count: int
|
|
364
|
-
allowed_to_proceed: bool
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
class FinetuneList(BaseModel):
|
|
368
|
-
# object type
|
|
369
|
-
object: Literal["list"] | None = None
|
|
370
|
-
# list of fine-tune job objects
|
|
371
|
-
data: List[FinetuneResponse] | None = None
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
class FinetuneListEvents(BaseModel):
|
|
375
|
-
# object type
|
|
376
|
-
object: Literal["list"] | None = None
|
|
377
|
-
# list of fine-tune events
|
|
378
|
-
data: List[FinetuneEvent] | None = None
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
class FinetuneDeleteResponse(BaseModel):
|
|
382
|
-
# delete message
|
|
383
|
-
message: str
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
class FinetuneDownloadResult(BaseModel):
|
|
387
|
-
# object type
|
|
388
|
-
object: Literal["local"] | None = None
|
|
389
|
-
# fine-tune job id
|
|
390
|
-
id: str | None = None
|
|
391
|
-
# checkpoint step number
|
|
392
|
-
checkpoint_step: int | None = None
|
|
393
|
-
# local path filename
|
|
394
|
-
filename: str | None = None
|
|
395
|
-
# size in bytes
|
|
396
|
-
size: int | None = None
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
class FinetuneFullTrainingLimits(BaseModel):
|
|
400
|
-
max_batch_size: int
|
|
401
|
-
max_batch_size_dpo: int = -1
|
|
402
|
-
min_batch_size: int
|
|
403
|
-
|
|
404
|
-
def __init__(self, **data: Any) -> None:
|
|
405
|
-
super().__init__(**data)
|
|
406
|
-
if self.max_batch_size_dpo == -1:
|
|
407
|
-
half_max = self.max_batch_size // 2
|
|
408
|
-
rounded_half_max = (half_max // 8) * 8
|
|
409
|
-
self.max_batch_size_dpo = max(self.min_batch_size, rounded_half_max)
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
class FinetuneLoraTrainingLimits(FinetuneFullTrainingLimits):
|
|
413
|
-
max_rank: int
|
|
414
|
-
target_modules: List[str]
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
class FinetuneTrainingLimits(BaseModel):
|
|
418
|
-
max_num_epochs: int
|
|
419
|
-
max_learning_rate: float
|
|
420
|
-
min_learning_rate: float
|
|
421
|
-
full_training: FinetuneFullTrainingLimits | None = None
|
|
422
|
-
lora_training: FinetuneLoraTrainingLimits | None = None
|
|
423
|
-
supports_vision: bool = False
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
class LinearLRSchedulerArgs(BaseModel):
|
|
427
|
-
min_lr_ratio: float | None = 0.0
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
class CosineLRSchedulerArgs(BaseModel):
|
|
431
|
-
min_lr_ratio: float | None = 0.0
|
|
432
|
-
num_cycles: float | None = 0.5
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
class FinetuneLRScheduler(BaseModel):
|
|
436
|
-
lr_scheduler_type: str
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
class LinearLRScheduler(FinetuneLRScheduler):
|
|
440
|
-
lr_scheduler_type: Literal["linear"] = "linear"
|
|
441
|
-
lr_scheduler_args: LinearLRSchedulerArgs | None = None
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
class CosineLRScheduler(FinetuneLRScheduler):
|
|
445
|
-
lr_scheduler_type: Literal["cosine"] = "cosine"
|
|
446
|
-
lr_scheduler_args: CosineLRSchedulerArgs | None = None
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
# placeholder for old fine-tuning jobs with no lr_scheduler_type specified
|
|
450
|
-
class EmptyLRScheduler(FinetuneLRScheduler):
|
|
451
|
-
lr_scheduler_type: Literal[""]
|
|
452
|
-
lr_scheduler_args: None = None
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
class FinetuneCheckpoint(BaseModel):
|
|
456
|
-
"""
|
|
457
|
-
Fine-tuning checkpoint information
|
|
458
|
-
"""
|
|
459
|
-
|
|
460
|
-
# checkpoint type (e.g. "Intermediate", "Final", "Final Merged", "Final Adapter")
|
|
461
|
-
type: str
|
|
462
|
-
# timestamp when the checkpoint was created
|
|
463
|
-
timestamp: str
|
|
464
|
-
# checkpoint name/identifier
|
|
465
|
-
name: str
|
together/types/images.py
DELETED
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import List, Literal
|
|
4
|
-
|
|
5
|
-
from together.types.abstract import BaseModel
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class ImageRequest(BaseModel):
|
|
9
|
-
# input or list of inputs
|
|
10
|
-
prompt: str
|
|
11
|
-
# model to query
|
|
12
|
-
model: str
|
|
13
|
-
# seed
|
|
14
|
-
seed: int | None = None
|
|
15
|
-
# number of results to return
|
|
16
|
-
n: int | None = 1
|
|
17
|
-
# pixel height
|
|
18
|
-
height: int | None = 1024
|
|
19
|
-
# pixel width
|
|
20
|
-
width: int | None = 1024
|
|
21
|
-
# negative prompt
|
|
22
|
-
negative_prompt: str | None = None
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class ImageChoicesData(BaseModel):
|
|
26
|
-
# response index
|
|
27
|
-
index: int
|
|
28
|
-
# base64 image response
|
|
29
|
-
b64_json: str | None = None
|
|
30
|
-
# URL hosting image
|
|
31
|
-
url: str | None = None
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class ImageResponse(BaseModel):
|
|
35
|
-
# job id
|
|
36
|
-
id: str | None = None
|
|
37
|
-
# query model
|
|
38
|
-
model: str | None = None
|
|
39
|
-
# object type
|
|
40
|
-
object: Literal["list"] | None = None
|
|
41
|
-
# list of embedding choices
|
|
42
|
-
data: List[ImageChoicesData] | None = None
|
together/types/models.py
DELETED
|
@@ -1,96 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from enum import Enum
|
|
4
|
-
from typing import Any, Dict, Literal, Optional
|
|
5
|
-
|
|
6
|
-
from together.types.abstract import BaseModel
|
|
7
|
-
from together.types.common import ObjectType
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class ModelType(str, Enum):
|
|
11
|
-
CHAT = "chat"
|
|
12
|
-
LANGUAGE = "language"
|
|
13
|
-
CODE = "code"
|
|
14
|
-
IMAGE = "image"
|
|
15
|
-
EMBEDDING = "embedding"
|
|
16
|
-
MODERATION = "moderation"
|
|
17
|
-
RERANK = "rerank"
|
|
18
|
-
AUDIO = "audio"
|
|
19
|
-
TRANSCRIBE = "transcribe"
|
|
20
|
-
VIDEO = "video"
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class PricingObject(BaseModel):
|
|
24
|
-
input: float | None = None
|
|
25
|
-
output: float | None = None
|
|
26
|
-
hourly: float | None = None
|
|
27
|
-
base: float | None = None
|
|
28
|
-
finetune: float | None = None
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class ModelObject(BaseModel):
|
|
32
|
-
# model id
|
|
33
|
-
id: str
|
|
34
|
-
# object type
|
|
35
|
-
object: Literal[ObjectType.Model]
|
|
36
|
-
created: int | None = None
|
|
37
|
-
# model type
|
|
38
|
-
type: ModelType | None = None
|
|
39
|
-
# pretty name
|
|
40
|
-
display_name: str | None = None
|
|
41
|
-
# model creator organization
|
|
42
|
-
organization: str | None = None
|
|
43
|
-
# link to model resource
|
|
44
|
-
link: str | None = None
|
|
45
|
-
license: str | None = None
|
|
46
|
-
context_length: int | None = None
|
|
47
|
-
pricing: PricingObject
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
class ModelUploadRequest(BaseModel):
|
|
51
|
-
model_name: str
|
|
52
|
-
model_source: str
|
|
53
|
-
model_type: Literal["model", "adapter"] = "model"
|
|
54
|
-
hf_token: Optional[str] = None
|
|
55
|
-
description: Optional[str] = None
|
|
56
|
-
base_model: Optional[str] = None
|
|
57
|
-
lora_model: Optional[str] = None
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
class ModelUploadResponse(BaseModel):
|
|
61
|
-
job_id: Optional[str] = None
|
|
62
|
-
model_name: Optional[str] = None
|
|
63
|
-
model_id: Optional[str] = None
|
|
64
|
-
model_source: Optional[str] = None
|
|
65
|
-
message: str
|
|
66
|
-
|
|
67
|
-
@classmethod
|
|
68
|
-
def from_api_response(cls, response_data: Dict[str, Any]) -> "ModelUploadResponse":
|
|
69
|
-
"""Create ModelUploadResponse from API response, handling both flat and nested structures"""
|
|
70
|
-
# Start with the base response
|
|
71
|
-
result: Dict[str, Any] = {"message": response_data.get("message", "")}
|
|
72
|
-
|
|
73
|
-
# Check if we have nested data
|
|
74
|
-
if "data" in response_data and response_data["data"] is not None:
|
|
75
|
-
# Use nested data values
|
|
76
|
-
nested_data = response_data["data"]
|
|
77
|
-
result.update(
|
|
78
|
-
{
|
|
79
|
-
"job_id": nested_data.get("job_id"),
|
|
80
|
-
"model_name": nested_data.get("model_name"),
|
|
81
|
-
"model_id": nested_data.get("model_id"),
|
|
82
|
-
"model_source": nested_data.get("model_source"),
|
|
83
|
-
}
|
|
84
|
-
)
|
|
85
|
-
else:
|
|
86
|
-
# Use top-level values
|
|
87
|
-
result.update(
|
|
88
|
-
{
|
|
89
|
-
"job_id": response_data.get("job_id"),
|
|
90
|
-
"model_name": response_data.get("model_name"),
|
|
91
|
-
"model_id": response_data.get("model_id"),
|
|
92
|
-
"model_source": response_data.get("model_source"),
|
|
93
|
-
}
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
return cls(**result)
|
together/types/rerank.py
DELETED
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import List, Literal, Dict, Any
|
|
4
|
-
|
|
5
|
-
from together.types.abstract import BaseModel
|
|
6
|
-
from together.types.common import UsageData
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class RerankRequest(BaseModel):
|
|
10
|
-
# model to query
|
|
11
|
-
model: str
|
|
12
|
-
# input or list of inputs
|
|
13
|
-
query: str
|
|
14
|
-
# list of documents
|
|
15
|
-
documents: List[str] | List[Dict[str, Any]]
|
|
16
|
-
# return top_n results
|
|
17
|
-
top_n: int | None = None
|
|
18
|
-
# boolean to return documents
|
|
19
|
-
return_documents: bool = False
|
|
20
|
-
# field selector for documents
|
|
21
|
-
rank_fields: List[str] | None = None
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class RerankChoicesData(BaseModel):
|
|
25
|
-
# response index
|
|
26
|
-
index: int
|
|
27
|
-
# object type
|
|
28
|
-
relevance_score: float
|
|
29
|
-
# rerank response
|
|
30
|
-
document: Dict[str, Any] | None = None
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class RerankResponse(BaseModel):
|
|
34
|
-
# job id
|
|
35
|
-
id: str | None = None
|
|
36
|
-
# object type
|
|
37
|
-
object: Literal["rerank"] | None = None
|
|
38
|
-
# query model
|
|
39
|
-
model: str | None = None
|
|
40
|
-
# list of reranked results
|
|
41
|
-
results: List[RerankChoicesData] | None = None
|
|
42
|
-
# usage stats
|
|
43
|
-
usage: UsageData | None = None
|
together/types/videos.py
DELETED
|
@@ -1,69 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from typing import Any, Dict, List, Literal, Optional
|
|
4
|
-
|
|
5
|
-
from together.types.abstract import BaseModel
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class CreateVideoBody(BaseModel):
|
|
9
|
-
"""Request model for video creation"""
|
|
10
|
-
|
|
11
|
-
# Required parameters
|
|
12
|
-
model: str
|
|
13
|
-
|
|
14
|
-
# Optional dimension parameters
|
|
15
|
-
prompt: str | None = None
|
|
16
|
-
height: int | None = None
|
|
17
|
-
width: int | None = None
|
|
18
|
-
|
|
19
|
-
# Optional generation parameters
|
|
20
|
-
seconds: str | None = None # Min 1 max 10
|
|
21
|
-
fps: int | None = None # Frames per second, min 15 max 60, default 24
|
|
22
|
-
steps: int | None = None # Denoising steps, min 10 max 50, default 20
|
|
23
|
-
seed: int | None = None
|
|
24
|
-
guidance_scale: float | None = None # Default 8, recommended 6.0-10.0
|
|
25
|
-
output_format: Literal["MP4", "WEBM"] | None = (
|
|
26
|
-
None # "MP4" or "WEBM", default "MP4"
|
|
27
|
-
)
|
|
28
|
-
output_quality: int | None = None # Compression quality, default 20
|
|
29
|
-
negative_prompt: str | None = None
|
|
30
|
-
|
|
31
|
-
# Advanced parameters
|
|
32
|
-
frame_images: List[Dict[str, Any]] | None = None # Array of keyframe images
|
|
33
|
-
reference_images: List[str] | None = None # Array of reference images
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class VideoOutputs(BaseModel):
|
|
37
|
-
"""Artifacts generated from video creation job"""
|
|
38
|
-
|
|
39
|
-
cost: float
|
|
40
|
-
video_url: str
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
class Error(BaseModel):
|
|
44
|
-
"""Error information about the video job"""
|
|
45
|
-
|
|
46
|
-
code: str | None = None
|
|
47
|
-
message: str
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
class CreateVideoResponse(BaseModel):
|
|
51
|
-
"""Response from video generation request"""
|
|
52
|
-
|
|
53
|
-
id: str
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
class VideoJob(BaseModel):
|
|
57
|
-
"""Structured information describing a generated video job."""
|
|
58
|
-
|
|
59
|
-
id: str
|
|
60
|
-
model: str
|
|
61
|
-
object: Literal["video"]
|
|
62
|
-
status: Literal["queued", "in_progress", "completed", "failed", "cancelled"]
|
|
63
|
-
seconds: str
|
|
64
|
-
size: str
|
|
65
|
-
created_at: int
|
|
66
|
-
|
|
67
|
-
error: Error | None = None
|
|
68
|
-
outputs: VideoOutputs | None = None
|
|
69
|
-
completed_at: int | None = None
|