together 1.5.35__py3-none-any.whl → 2.0.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +66 -84
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -490
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a6.dist-info/METADATA +729 -0
  159. together-2.0.0a6.dist-info/RECORD +165 -0
  160. {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
  161. together-2.0.0a6.dist-info/entry_points.txt +2 -0
  162. {together-1.5.35.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -465
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.35.dist-info/METADATA +0 -583
  204. together-1.5.35.dist-info/RECORD +0 -77
  205. together-1.5.35.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -1,465 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from enum import Enum
4
- from typing import Any, List, Literal
5
-
6
- from pydantic import Field, StrictBool, field_validator
7
-
8
- from together.types.abstract import BaseModel
9
- from together.types.common import ObjectType
10
-
11
-
12
- class FinetuneJobStatus(str, Enum):
13
- """
14
- Possible fine-tune job status
15
- """
16
-
17
- STATUS_PENDING = "pending"
18
- STATUS_QUEUED = "queued"
19
- STATUS_RUNNING = "running"
20
- STATUS_COMPRESSING = "compressing"
21
- STATUS_UPLOADING = "uploading"
22
- STATUS_CANCEL_REQUESTED = "cancel_requested"
23
- STATUS_CANCELLED = "cancelled"
24
- STATUS_ERROR = "error"
25
- STATUS_USER_ERROR = "user_error"
26
- STATUS_COMPLETED = "completed"
27
-
28
-
29
- COMPLETED_STATUSES = [
30
- FinetuneJobStatus.STATUS_ERROR,
31
- FinetuneJobStatus.STATUS_USER_ERROR,
32
- FinetuneJobStatus.STATUS_COMPLETED,
33
- FinetuneJobStatus.STATUS_CANCELLED,
34
- ]
35
-
36
-
37
- class FinetuneEventLevels(str, Enum):
38
- """
39
- Fine-tune job event status levels
40
- """
41
-
42
- NULL = ""
43
- INFO = "Info"
44
- WARNING = "Warning"
45
- ERROR = "Error"
46
- LEGACY_INFO = "info"
47
- LEGACY_IWARNING = "warning"
48
- LEGACY_IERROR = "error"
49
-
50
-
51
- class FinetuneEventType(str, Enum):
52
- """
53
- Fine-tune job event types
54
- """
55
-
56
- JOB_PENDING = "JOB_PENDING"
57
- JOB_START = "JOB_START"
58
- JOB_STOPPED = "JOB_STOPPED"
59
- MODEL_DOWNLOADING = "MODEL_DOWNLOADING"
60
- MODEL_DOWNLOAD_COMPLETE = "MODEL_DOWNLOAD_COMPLETE"
61
- TRAINING_DATA_DOWNLOADING = "TRAINING_DATA_DOWNLOADING"
62
- TRAINING_DATA_DOWNLOAD_COMPLETE = "TRAINING_DATA_DOWNLOAD_COMPLETE"
63
- VALIDATION_DATA_DOWNLOADING = "VALIDATION_DATA_DOWNLOADING"
64
- VALIDATION_DATA_DOWNLOAD_COMPLETE = "VALIDATION_DATA_DOWNLOAD_COMPLETE"
65
- WANDB_INIT = "WANDB_INIT"
66
- TRAINING_START = "TRAINING_START"
67
- CHECKPOINT_SAVE = "CHECKPOINT_SAVE"
68
- BILLING_LIMIT = "BILLING_LIMIT"
69
- EPOCH_COMPLETE = "EPOCH_COMPLETE"
70
- EVAL_COMPLETE = "EVAL_COMPLETE"
71
- TRAINING_COMPLETE = "TRAINING_COMPLETE"
72
- MODEL_COMPRESSING = "COMPRESSING_MODEL"
73
- MODEL_COMPRESSION_COMPLETE = "MODEL_COMPRESSION_COMPLETE"
74
- MODEL_UPLOADING = "MODEL_UPLOADING"
75
- MODEL_UPLOAD_COMPLETE = "MODEL_UPLOAD_COMPLETE"
76
- MODEL_UPLOADING_TO_HF = "MODEL_UPLOADING_TO_HF"
77
- MODEL_UPLOAD_TO_HF_COMPLETE = "MODEL_UPLOAD_TO_HF_COMPLETE"
78
- JOB_COMPLETE = "JOB_COMPLETE"
79
- JOB_ERROR = "JOB_ERROR"
80
- JOB_USER_ERROR = "JOB_USER_ERROR"
81
- CANCEL_REQUESTED = "CANCEL_REQUESTED"
82
- JOB_RESTARTED = "JOB_RESTARTED"
83
- REFUND = "REFUND"
84
- WARNING = "WARNING"
85
-
86
-
87
- class DownloadCheckpointType(Enum):
88
- DEFAULT = "default"
89
- MERGED = "merged"
90
- ADAPTER = "adapter"
91
-
92
-
93
- class FinetuneEvent(BaseModel):
94
- """
95
- Fine-tune event type
96
- """
97
-
98
- # object type
99
- object: Literal[ObjectType.FinetuneEvent]
100
- # created at datetime stamp
101
- created_at: str | None = None
102
- # event log level
103
- level: FinetuneEventLevels | None = None
104
- # event message string
105
- message: str | None = None
106
- # event type
107
- type: FinetuneEventType | None = None
108
- # optional: model parameter count
109
- param_count: int | None = None
110
- # optional: dataset token count
111
- token_count: int | None = None
112
- # optional: weights & biases url
113
- wandb_url: str | None = None
114
- # event hash
115
- hash: str | None = None
116
-
117
-
118
- class TrainingType(BaseModel):
119
- """
120
- Abstract training type
121
- """
122
-
123
- type: str
124
-
125
-
126
- class FullTrainingType(TrainingType):
127
- """
128
- Training type for full fine-tuning
129
- """
130
-
131
- type: str = "Full"
132
-
133
-
134
- class LoRATrainingType(TrainingType):
135
- """
136
- Training type for LoRA adapters training
137
- """
138
-
139
- lora_r: int
140
- lora_alpha: int
141
- lora_dropout: float = 0.0
142
- lora_trainable_modules: str = "all-linear"
143
- type: str = "Lora"
144
-
145
-
146
- class TrainingMethod(BaseModel):
147
- """
148
- Training method type
149
- """
150
-
151
- method: str
152
-
153
-
154
- class TrainingMethodSFT(TrainingMethod):
155
- """
156
- Training method type for SFT training
157
- """
158
-
159
- method: Literal["sft"] = "sft"
160
- train_on_inputs: StrictBool | Literal["auto"] = "auto"
161
-
162
-
163
- class TrainingMethodDPO(TrainingMethod):
164
- """
165
- Training method type for DPO training
166
- """
167
-
168
- method: Literal["dpo"] = "dpo"
169
- dpo_beta: float | None = None
170
- dpo_normalize_logratios_by_length: bool = False
171
- dpo_reference_free: bool = False
172
- rpo_alpha: float | None = None
173
- simpo_gamma: float | None = None
174
-
175
-
176
- class FinetuneMultimodalParams(BaseModel):
177
- """
178
- Multimodal parameters
179
- """
180
-
181
- train_vision: bool = False
182
-
183
-
184
- class FinetuneProgress(BaseModel):
185
- """
186
- Fine-tune job progress
187
- """
188
-
189
- estimate_available: bool = False
190
- seconds_remaining: float = 0
191
-
192
-
193
- class FinetuneRequest(BaseModel):
194
- """
195
- Fine-tune request type
196
- """
197
-
198
- # training file ID
199
- training_file: str
200
- # validation file id
201
- validation_file: str | None = None
202
- # base model string
203
- model: str | None = None
204
- # number of epochs to train for
205
- n_epochs: int
206
- # training learning rate
207
- learning_rate: float
208
- # learning rate scheduler type and args
209
- lr_scheduler: LinearLRScheduler | CosineLRScheduler | None = None
210
- # learning rate warmup ratio
211
- warmup_ratio: float
212
- # max gradient norm
213
- max_grad_norm: float
214
- # weight decay
215
- weight_decay: float
216
- # number of checkpoints to save
217
- n_checkpoints: int | None = None
218
- # number of evaluation loops to run
219
- n_evals: int | None = None
220
- # training batch size
221
- batch_size: int | Literal["max"] | None = None
222
- # up to 40 character suffix for output model name
223
- suffix: str | None = None
224
- # weights & biases api key
225
- wandb_key: str | None = None
226
- # weights & biases base url
227
- wandb_base_url: str | None = None
228
- # wandb project name
229
- wandb_project_name: str | None = None
230
- # wandb run name
231
- wandb_name: str | None = None
232
- # training type
233
- training_type: FullTrainingType | LoRATrainingType | None = None
234
- # training method
235
- training_method: TrainingMethodSFT | TrainingMethodDPO = Field(
236
- default_factory=TrainingMethodSFT
237
- )
238
- # from step
239
- from_checkpoint: str | None = None
240
- # multimodal parameters
241
- multimodal_params: FinetuneMultimodalParams | None = None
242
- # hf related fields
243
- hf_api_token: str | None = None
244
- hf_output_repo_name: str | None = None
245
-
246
-
247
- class FinetuneResponse(BaseModel):
248
- """
249
- Fine-tune API response type
250
- """
251
-
252
- # job ID
253
- id: str | None = None
254
- # training file id
255
- training_file: str | None = None
256
- # validation file id
257
- validation_file: str | None = None
258
- # base model name
259
- model: str | None = None
260
- # output model name
261
- output_name: str | None = Field(None, alias="model_output_name")
262
- # adapter output name
263
- adapter_output_name: str | None = None
264
- # number of epochs
265
- n_epochs: int | None = None
266
- # number of checkpoints to save
267
- n_checkpoints: int | None = None
268
- # number of evaluation loops
269
- n_evals: int | None = None
270
- # training batch size
271
- batch_size: int | None = None
272
- # training learning rate
273
- learning_rate: float | None = None
274
- # learning rate scheduler type and args
275
- lr_scheduler: LinearLRScheduler | CosineLRScheduler | EmptyLRScheduler | None = None
276
- # learning rate warmup ratio
277
- warmup_ratio: float | None = None
278
- # max gradient norm
279
- max_grad_norm: float | None = None
280
- # weight decay
281
- weight_decay: float | None = None
282
- # number of steps between evals
283
- eval_steps: int | None = None
284
- # training type
285
- training_type: TrainingType | None = None
286
- # created/updated datetime stamps
287
- created_at: str | None = None
288
- updated_at: str | None = None
289
- started_at: str | None = None
290
- # job status
291
- status: FinetuneJobStatus | None = None
292
- # job id
293
- job_id: str | None = None
294
- # list of fine-tune events
295
- events: List[FinetuneEvent] | None = None
296
- # dataset token count
297
- token_count: int | None = None
298
- # model parameter count
299
- param_count: int | None = None
300
- # fine-tune job price
301
- total_price: int | None = None
302
- # total number of training steps
303
- total_steps: int | None = None
304
- # number of steps completed (incrementing counter)
305
- steps_completed: int | None = None
306
- # number of epochs completed (incrementing counter)
307
- epochs_completed: int | None = None
308
- # number of evaluation loops completed (incrementing counter)
309
- evals_completed: int | None = None
310
- # place in job queue (decrementing counter)
311
- queue_depth: int | None = None
312
- # weights & biases base url
313
- wandb_base_url: str | None = None
314
- # wandb project name
315
- wandb_project_name: str | None = None
316
- # wandb run name
317
- wandb_name: str | None = None
318
- # weights & biases job url
319
- wandb_url: str | None = None
320
- # training file metadata
321
- training_file_num_lines: int | None = Field(None, alias="TrainingFileNumLines")
322
- training_file_size: int | None = Field(None, alias="TrainingFileSize")
323
- train_on_inputs: StrictBool | Literal["auto"] | None = "auto"
324
- from_checkpoint: str | None = None
325
- # multimodal parameters
326
- multimodal_params: FinetuneMultimodalParams | None = None
327
-
328
- progress: FinetuneProgress | None = None
329
-
330
- @field_validator("training_type")
331
- @classmethod
332
- def validate_training_type(cls, v: TrainingType) -> TrainingType:
333
- if v.type == "Full" or v.type == "":
334
- return FullTrainingType(**v.model_dump())
335
- elif v.type == "Lora":
336
- return LoRATrainingType(**v.model_dump())
337
- else:
338
- raise ValueError("Unknown training type")
339
-
340
-
341
- class FinetunePriceEstimationRequest(BaseModel):
342
- """
343
- Fine-tune price estimation request type
344
- """
345
-
346
- training_file: str
347
- validation_file: str | None = None
348
- model: str
349
- n_epochs: int
350
- n_evals: int
351
- training_type: LoRATrainingType | FullTrainingType
352
- training_method: TrainingMethodSFT | TrainingMethodDPO
353
-
354
-
355
- class FinetunePriceEstimationResponse(BaseModel):
356
- """
357
- Fine-tune price estimation response type
358
- """
359
-
360
- estimated_total_price: float
361
- user_limit: float
362
- estimated_train_token_count: int
363
- estimated_eval_token_count: int
364
- allowed_to_proceed: bool
365
-
366
-
367
- class FinetuneList(BaseModel):
368
- # object type
369
- object: Literal["list"] | None = None
370
- # list of fine-tune job objects
371
- data: List[FinetuneResponse] | None = None
372
-
373
-
374
- class FinetuneListEvents(BaseModel):
375
- # object type
376
- object: Literal["list"] | None = None
377
- # list of fine-tune events
378
- data: List[FinetuneEvent] | None = None
379
-
380
-
381
- class FinetuneDeleteResponse(BaseModel):
382
- # delete message
383
- message: str
384
-
385
-
386
- class FinetuneDownloadResult(BaseModel):
387
- # object type
388
- object: Literal["local"] | None = None
389
- # fine-tune job id
390
- id: str | None = None
391
- # checkpoint step number
392
- checkpoint_step: int | None = None
393
- # local path filename
394
- filename: str | None = None
395
- # size in bytes
396
- size: int | None = None
397
-
398
-
399
- class FinetuneFullTrainingLimits(BaseModel):
400
- max_batch_size: int
401
- max_batch_size_dpo: int = -1
402
- min_batch_size: int
403
-
404
- def __init__(self, **data: Any) -> None:
405
- super().__init__(**data)
406
- if self.max_batch_size_dpo == -1:
407
- half_max = self.max_batch_size // 2
408
- rounded_half_max = (half_max // 8) * 8
409
- self.max_batch_size_dpo = max(self.min_batch_size, rounded_half_max)
410
-
411
-
412
- class FinetuneLoraTrainingLimits(FinetuneFullTrainingLimits):
413
- max_rank: int
414
- target_modules: List[str]
415
-
416
-
417
- class FinetuneTrainingLimits(BaseModel):
418
- max_num_epochs: int
419
- max_learning_rate: float
420
- min_learning_rate: float
421
- full_training: FinetuneFullTrainingLimits | None = None
422
- lora_training: FinetuneLoraTrainingLimits | None = None
423
- supports_vision: bool = False
424
-
425
-
426
- class LinearLRSchedulerArgs(BaseModel):
427
- min_lr_ratio: float | None = 0.0
428
-
429
-
430
- class CosineLRSchedulerArgs(BaseModel):
431
- min_lr_ratio: float | None = 0.0
432
- num_cycles: float | None = 0.5
433
-
434
-
435
- class FinetuneLRScheduler(BaseModel):
436
- lr_scheduler_type: str
437
-
438
-
439
- class LinearLRScheduler(FinetuneLRScheduler):
440
- lr_scheduler_type: Literal["linear"] = "linear"
441
- lr_scheduler_args: LinearLRSchedulerArgs | None = None
442
-
443
-
444
- class CosineLRScheduler(FinetuneLRScheduler):
445
- lr_scheduler_type: Literal["cosine"] = "cosine"
446
- lr_scheduler_args: CosineLRSchedulerArgs | None = None
447
-
448
-
449
- # placeholder for old fine-tuning jobs with no lr_scheduler_type specified
450
- class EmptyLRScheduler(FinetuneLRScheduler):
451
- lr_scheduler_type: Literal[""]
452
- lr_scheduler_args: None = None
453
-
454
-
455
- class FinetuneCheckpoint(BaseModel):
456
- """
457
- Fine-tuning checkpoint information
458
- """
459
-
460
- # checkpoint type (e.g. "Intermediate", "Final", "Final Merged", "Final Adapter")
461
- type: str
462
- # timestamp when the checkpoint was created
463
- timestamp: str
464
- # checkpoint name/identifier
465
- name: str
together/types/images.py DELETED
@@ -1,42 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import List, Literal
4
-
5
- from together.types.abstract import BaseModel
6
-
7
-
8
- class ImageRequest(BaseModel):
9
- # input or list of inputs
10
- prompt: str
11
- # model to query
12
- model: str
13
- # seed
14
- seed: int | None = None
15
- # number of results to return
16
- n: int | None = 1
17
- # pixel height
18
- height: int | None = 1024
19
- # pixel width
20
- width: int | None = 1024
21
- # negative prompt
22
- negative_prompt: str | None = None
23
-
24
-
25
- class ImageChoicesData(BaseModel):
26
- # response index
27
- index: int
28
- # base64 image response
29
- b64_json: str | None = None
30
- # URL hosting image
31
- url: str | None = None
32
-
33
-
34
- class ImageResponse(BaseModel):
35
- # job id
36
- id: str | None = None
37
- # query model
38
- model: str | None = None
39
- # object type
40
- object: Literal["list"] | None = None
41
- # list of embedding choices
42
- data: List[ImageChoicesData] | None = None
together/types/models.py DELETED
@@ -1,96 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from enum import Enum
4
- from typing import Any, Dict, Literal, Optional
5
-
6
- from together.types.abstract import BaseModel
7
- from together.types.common import ObjectType
8
-
9
-
10
- class ModelType(str, Enum):
11
- CHAT = "chat"
12
- LANGUAGE = "language"
13
- CODE = "code"
14
- IMAGE = "image"
15
- EMBEDDING = "embedding"
16
- MODERATION = "moderation"
17
- RERANK = "rerank"
18
- AUDIO = "audio"
19
- TRANSCRIBE = "transcribe"
20
- VIDEO = "video"
21
-
22
-
23
- class PricingObject(BaseModel):
24
- input: float | None = None
25
- output: float | None = None
26
- hourly: float | None = None
27
- base: float | None = None
28
- finetune: float | None = None
29
-
30
-
31
- class ModelObject(BaseModel):
32
- # model id
33
- id: str
34
- # object type
35
- object: Literal[ObjectType.Model]
36
- created: int | None = None
37
- # model type
38
- type: ModelType | None = None
39
- # pretty name
40
- display_name: str | None = None
41
- # model creator organization
42
- organization: str | None = None
43
- # link to model resource
44
- link: str | None = None
45
- license: str | None = None
46
- context_length: int | None = None
47
- pricing: PricingObject
48
-
49
-
50
- class ModelUploadRequest(BaseModel):
51
- model_name: str
52
- model_source: str
53
- model_type: Literal["model", "adapter"] = "model"
54
- hf_token: Optional[str] = None
55
- description: Optional[str] = None
56
- base_model: Optional[str] = None
57
- lora_model: Optional[str] = None
58
-
59
-
60
- class ModelUploadResponse(BaseModel):
61
- job_id: Optional[str] = None
62
- model_name: Optional[str] = None
63
- model_id: Optional[str] = None
64
- model_source: Optional[str] = None
65
- message: str
66
-
67
- @classmethod
68
- def from_api_response(cls, response_data: Dict[str, Any]) -> "ModelUploadResponse":
69
- """Create ModelUploadResponse from API response, handling both flat and nested structures"""
70
- # Start with the base response
71
- result: Dict[str, Any] = {"message": response_data.get("message", "")}
72
-
73
- # Check if we have nested data
74
- if "data" in response_data and response_data["data"] is not None:
75
- # Use nested data values
76
- nested_data = response_data["data"]
77
- result.update(
78
- {
79
- "job_id": nested_data.get("job_id"),
80
- "model_name": nested_data.get("model_name"),
81
- "model_id": nested_data.get("model_id"),
82
- "model_source": nested_data.get("model_source"),
83
- }
84
- )
85
- else:
86
- # Use top-level values
87
- result.update(
88
- {
89
- "job_id": response_data.get("job_id"),
90
- "model_name": response_data.get("model_name"),
91
- "model_id": response_data.get("model_id"),
92
- "model_source": response_data.get("model_source"),
93
- }
94
- )
95
-
96
- return cls(**result)
together/types/rerank.py DELETED
@@ -1,43 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import List, Literal, Dict, Any
4
-
5
- from together.types.abstract import BaseModel
6
- from together.types.common import UsageData
7
-
8
-
9
- class RerankRequest(BaseModel):
10
- # model to query
11
- model: str
12
- # input or list of inputs
13
- query: str
14
- # list of documents
15
- documents: List[str] | List[Dict[str, Any]]
16
- # return top_n results
17
- top_n: int | None = None
18
- # boolean to return documents
19
- return_documents: bool = False
20
- # field selector for documents
21
- rank_fields: List[str] | None = None
22
-
23
-
24
- class RerankChoicesData(BaseModel):
25
- # response index
26
- index: int
27
- # object type
28
- relevance_score: float
29
- # rerank response
30
- document: Dict[str, Any] | None = None
31
-
32
-
33
- class RerankResponse(BaseModel):
34
- # job id
35
- id: str | None = None
36
- # object type
37
- object: Literal["rerank"] | None = None
38
- # query model
39
- model: str | None = None
40
- # list of reranked results
41
- results: List[RerankChoicesData] | None = None
42
- # usage stats
43
- usage: UsageData | None = None
together/types/videos.py DELETED
@@ -1,69 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import Any, Dict, List, Literal, Optional
4
-
5
- from together.types.abstract import BaseModel
6
-
7
-
8
- class CreateVideoBody(BaseModel):
9
- """Request model for video creation"""
10
-
11
- # Required parameters
12
- model: str
13
-
14
- # Optional dimension parameters
15
- prompt: str | None = None
16
- height: int | None = None
17
- width: int | None = None
18
-
19
- # Optional generation parameters
20
- seconds: str | None = None # Min 1 max 10
21
- fps: int | None = None # Frames per second, min 15 max 60, default 24
22
- steps: int | None = None # Denoising steps, min 10 max 50, default 20
23
- seed: int | None = None
24
- guidance_scale: float | None = None # Default 8, recommended 6.0-10.0
25
- output_format: Literal["MP4", "WEBM"] | None = (
26
- None # "MP4" or "WEBM", default "MP4"
27
- )
28
- output_quality: int | None = None # Compression quality, default 20
29
- negative_prompt: str | None = None
30
-
31
- # Advanced parameters
32
- frame_images: List[Dict[str, Any]] | None = None # Array of keyframe images
33
- reference_images: List[str] | None = None # Array of reference images
34
-
35
-
36
- class VideoOutputs(BaseModel):
37
- """Artifacts generated from video creation job"""
38
-
39
- cost: float
40
- video_url: str
41
-
42
-
43
- class Error(BaseModel):
44
- """Error information about the video job"""
45
-
46
- code: str | None = None
47
- message: str
48
-
49
-
50
- class CreateVideoResponse(BaseModel):
51
- """Response from video generation request"""
52
-
53
- id: str
54
-
55
-
56
- class VideoJob(BaseModel):
57
- """Structured information describing a generated video job."""
58
-
59
- id: str
60
- model: str
61
- object: Literal["video"]
62
- status: Literal["queued", "in_progress", "completed", "failed", "cancelled"]
63
- seconds: str
64
- size: str
65
- created_at: int
66
-
67
- error: Error | None = None
68
- outputs: VideoOutputs | None = None
69
- completed_at: int | None = None