together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. together/__init__.py +101 -63
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +108 -75
  30. together/lib/cli/api/evals.py +588 -0
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
  33. together/lib/cli/api/models.py +140 -0
  34. together/{cli → lib/cli}/api/utils.py +6 -7
  35. together/{cli → lib/cli}/cli.py +16 -24
  36. together/{constants.py → lib/constants.py} +17 -12
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +455 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/lib/utils/files.py +628 -0
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -33
  50. together/resources/audio/__init__.py +72 -21
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -122
  53. together/resources/audio/transcriptions.py +282 -0
  54. together/resources/audio/translations.py +256 -0
  55. together/resources/audio/voices.py +135 -0
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +598 -395
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +398 -121
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +256 -108
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +238 -90
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +374 -0
  75. together/types/__init__.py +65 -109
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_job.py +57 -0
  157. together-2.0.0a8.dist-info/METADATA +680 -0
  158. together-2.0.0a8.dist-info/RECORD +164 -0
  159. {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
  160. together-2.0.0a8.dist-info/entry_points.txt +2 -0
  161. {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
  162. together/abstract/api_requestor.py +0 -729
  163. together/cli/api/chat.py +0 -276
  164. together/cli/api/completions.py +0 -119
  165. together/cli/api/images.py +0 -93
  166. together/cli/api/models.py +0 -55
  167. together/client.py +0 -176
  168. together/error.py +0 -194
  169. together/filemanager.py +0 -389
  170. together/legacy/__init__.py +0 -0
  171. together/legacy/base.py +0 -27
  172. together/legacy/complete.py +0 -93
  173. together/legacy/embeddings.py +0 -27
  174. together/legacy/files.py +0 -146
  175. together/legacy/finetune.py +0 -177
  176. together/legacy/images.py +0 -27
  177. together/legacy/models.py +0 -44
  178. together/resources/batch.py +0 -136
  179. together/resources/code_interpreter.py +0 -82
  180. together/resources/finetune.py +0 -1064
  181. together/together_response.py +0 -50
  182. together/types/abstract.py +0 -26
  183. together/types/audio_speech.py +0 -110
  184. together/types/batch.py +0 -53
  185. together/types/chat_completions.py +0 -197
  186. together/types/code_interpreter.py +0 -57
  187. together/types/common.py +0 -66
  188. together/types/completions.py +0 -107
  189. together/types/embeddings.py +0 -35
  190. together/types/endpoints.py +0 -123
  191. together/types/error.py +0 -16
  192. together/types/files.py +0 -90
  193. together/types/finetune.py +0 -398
  194. together/types/images.py +0 -44
  195. together/types/models.py +0 -45
  196. together/types/rerank.py +0 -43
  197. together/utils/api_helpers.py +0 -124
  198. together/utils/files.py +0 -425
  199. together/version.py +0 -6
  200. together-1.5.17.dist-info/METADATA +0 -525
  201. together-1.5.17.dist-info/RECORD +0 -69
  202. together-1.5.17.dist-info/entry_points.txt +0 -3
  203. /together/{abstract → lib/cli}/__init__.py +0 -0
  204. /together/{cli → lib/cli/api}/__init__.py +0 -0
  205. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -1,1064 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import re
4
- from pathlib import Path
5
- from typing import List, Dict, Literal
6
-
7
- from rich import print as rprint
8
-
9
- from together.abstract import api_requestor
10
- from together.filemanager import DownloadManager
11
- from together.together_response import TogetherResponse
12
- from together.types import (
13
- CosineLRScheduler,
14
- CosineLRSchedulerArgs,
15
- FinetuneCheckpoint,
16
- FinetuneDownloadResult,
17
- FinetuneList,
18
- FinetuneListEvents,
19
- FinetuneLRScheduler,
20
- FinetuneRequest,
21
- FinetuneResponse,
22
- FinetuneTrainingLimits,
23
- FullTrainingType,
24
- LinearLRScheduler,
25
- LinearLRSchedulerArgs,
26
- LoRATrainingType,
27
- TogetherClient,
28
- TogetherRequest,
29
- TrainingMethodDPO,
30
- TrainingMethodSFT,
31
- TrainingType,
32
- )
33
- from together.types.finetune import DownloadCheckpointType
34
- from together.utils import log_warn_once, normalize_key
35
-
36
-
37
- _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$"
38
-
39
-
40
- AVAILABLE_TRAINING_METHODS = {
41
- TrainingMethodSFT().method,
42
- TrainingMethodDPO().method,
43
- }
44
-
45
-
46
- def create_finetune_request(
47
- model_limits: FinetuneTrainingLimits,
48
- training_file: str,
49
- model: str | None = None,
50
- n_epochs: int = 1,
51
- validation_file: str | None = "",
52
- n_evals: int | None = 0,
53
- n_checkpoints: int | None = 1,
54
- batch_size: int | Literal["max"] = "max",
55
- learning_rate: float | None = 0.00001,
56
- lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
57
- min_lr_ratio: float = 0.0,
58
- scheduler_num_cycles: float = 0.5,
59
- warmup_ratio: float | None = None,
60
- max_grad_norm: float = 1.0,
61
- weight_decay: float = 0.0,
62
- lora: bool = False,
63
- lora_r: int | None = None,
64
- lora_dropout: float | None = 0,
65
- lora_alpha: float | None = None,
66
- lora_trainable_modules: str | None = "all-linear",
67
- suffix: str | None = None,
68
- wandb_api_key: str | None = None,
69
- wandb_base_url: str | None = None,
70
- wandb_project_name: str | None = None,
71
- wandb_name: str | None = None,
72
- train_on_inputs: bool | Literal["auto"] | None = None,
73
- training_method: str = "sft",
74
- dpo_beta: float | None = None,
75
- dpo_normalize_logratios_by_length: bool = False,
76
- rpo_alpha: float | None = None,
77
- simpo_gamma: float | None = None,
78
- from_checkpoint: str | None = None,
79
- ) -> FinetuneRequest:
80
- if model is not None and from_checkpoint is not None:
81
- raise ValueError(
82
- "You must specify either a model or a checkpoint to start a job from, not both"
83
- )
84
-
85
- if model is None and from_checkpoint is None:
86
- raise ValueError("You must specify either a model or a checkpoint")
87
-
88
- model_or_checkpoint = model or from_checkpoint
89
-
90
- if batch_size == "max":
91
- log_warn_once(
92
- "Starting from together>=1.3.0, "
93
- "the default batch size is set to the maximum allowed value for each model."
94
- )
95
- if warmup_ratio is None:
96
- warmup_ratio = 0.0
97
-
98
- training_type: TrainingType = FullTrainingType()
99
- max_batch_size: int = 0
100
- max_batch_size_dpo: int = 0
101
- min_batch_size: int = 0
102
- if lora:
103
- if model_limits.lora_training is None:
104
- raise ValueError(
105
- f"LoRA adapters are not supported for the selected model ({model_or_checkpoint})."
106
- )
107
-
108
- if lora_dropout is not None:
109
- if not 0 <= lora_dropout < 1.0:
110
- raise ValueError("LoRA dropout must be in [0, 1) range.")
111
-
112
- lora_r = lora_r if lora_r is not None else model_limits.lora_training.max_rank
113
- lora_alpha = lora_alpha if lora_alpha is not None else lora_r * 2
114
- training_type = LoRATrainingType(
115
- lora_r=lora_r,
116
- lora_alpha=lora_alpha,
117
- lora_dropout=lora_dropout,
118
- lora_trainable_modules=lora_trainable_modules,
119
- )
120
-
121
- max_batch_size = model_limits.lora_training.max_batch_size
122
- min_batch_size = model_limits.lora_training.min_batch_size
123
- max_batch_size_dpo = model_limits.lora_training.max_batch_size_dpo
124
- else:
125
- if model_limits.full_training is None:
126
- raise ValueError(
127
- f"Full training is not supported for the selected model ({model_or_checkpoint})."
128
- )
129
-
130
- max_batch_size = model_limits.full_training.max_batch_size
131
- min_batch_size = model_limits.full_training.min_batch_size
132
- max_batch_size_dpo = model_limits.full_training.max_batch_size_dpo
133
-
134
- if batch_size == "max":
135
- if training_method == "dpo":
136
- batch_size = max_batch_size_dpo
137
- else:
138
- batch_size = max_batch_size
139
-
140
- if training_method == "sft":
141
- if batch_size > max_batch_size:
142
- raise ValueError(
143
- f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size}."
144
- )
145
- elif training_method == "dpo":
146
- if batch_size > max_batch_size_dpo:
147
- raise ValueError(
148
- f"Requested batch size of {batch_size} is higher that the maximum allowed value of {max_batch_size_dpo}."
149
- )
150
-
151
- if batch_size < min_batch_size:
152
- raise ValueError(
153
- f"Requested batch size of {batch_size} is lower that the minimum allowed value of {min_batch_size}."
154
- )
155
-
156
- if warmup_ratio > 1 or warmup_ratio < 0:
157
- raise ValueError(f"Warmup ratio should be between 0 and 1 (got {warmup_ratio})")
158
-
159
- if min_lr_ratio is not None and (min_lr_ratio > 1 or min_lr_ratio < 0):
160
- raise ValueError(
161
- f"Min learning rate ratio should be between 0 and 1 (got {min_lr_ratio})"
162
- )
163
-
164
- if max_grad_norm < 0:
165
- raise ValueError(
166
- f"Max gradient norm should be non-negative (got {max_grad_norm})"
167
- )
168
-
169
- if weight_decay is not None and (weight_decay < 0):
170
- raise ValueError(f"Weight decay should be non-negative (got {weight_decay})")
171
-
172
- if training_method not in AVAILABLE_TRAINING_METHODS:
173
- raise ValueError(
174
- f"training_method must be one of {', '.join(AVAILABLE_TRAINING_METHODS)}"
175
- )
176
-
177
- if train_on_inputs is not None and training_method != "sft":
178
- raise ValueError("train_on_inputs is only supported for SFT training")
179
-
180
- if train_on_inputs is None and training_method == "sft":
181
- log_warn_once(
182
- "train_on_inputs is not set for SFT training, it will be set to 'auto'"
183
- )
184
- train_on_inputs = "auto"
185
-
186
- if dpo_beta is not None and training_method != "dpo":
187
- raise ValueError("dpo_beta is only supported for DPO training")
188
- if dpo_normalize_logratios_by_length and training_method != "dpo":
189
- raise ValueError(
190
- "dpo_normalize_logratios_by_length=True is only supported for DPO training"
191
- )
192
- if rpo_alpha is not None:
193
- if training_method != "dpo":
194
- raise ValueError("rpo_alpha is only supported for DPO training")
195
- if not rpo_alpha >= 0.0:
196
- raise ValueError(f"rpo_alpha should be non-negative (got {rpo_alpha})")
197
-
198
- if simpo_gamma is not None:
199
- if training_method != "dpo":
200
- raise ValueError("simpo_gamma is only supported for DPO training")
201
- if not simpo_gamma >= 0.0:
202
- raise ValueError(f"simpo_gamma should be non-negative (got {simpo_gamma})")
203
-
204
- lr_scheduler: FinetuneLRScheduler
205
- if lr_scheduler_type == "cosine":
206
- if scheduler_num_cycles <= 0.0:
207
- raise ValueError(
208
- f"Number of cycles should be greater than 0 (got {scheduler_num_cycles})"
209
- )
210
-
211
- lr_scheduler = CosineLRScheduler(
212
- lr_scheduler_args=CosineLRSchedulerArgs(
213
- min_lr_ratio=min_lr_ratio, num_cycles=scheduler_num_cycles
214
- ),
215
- )
216
- else:
217
- lr_scheduler = LinearLRScheduler(
218
- lr_scheduler_args=LinearLRSchedulerArgs(min_lr_ratio=min_lr_ratio),
219
- )
220
-
221
- training_method_cls: TrainingMethodSFT | TrainingMethodDPO
222
- if training_method == "sft":
223
- training_method_cls = TrainingMethodSFT(train_on_inputs=train_on_inputs)
224
- elif training_method == "dpo":
225
- if simpo_gamma is not None and simpo_gamma > 0:
226
- dpo_reference_free = True
227
- dpo_normalize_logratios_by_length = True
228
- rprint(
229
- f"Parameter simpo_gamma was set to {simpo_gamma}. "
230
- "SimPO training detected. Reference logits will not be used "
231
- "and length normalization of log-probabilities will be enabled."
232
- )
233
- else:
234
- dpo_reference_free = False
235
-
236
- training_method_cls = TrainingMethodDPO(
237
- dpo_beta=dpo_beta,
238
- dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
239
- dpo_reference_free=dpo_reference_free,
240
- rpo_alpha=rpo_alpha,
241
- simpo_gamma=simpo_gamma,
242
- )
243
-
244
- finetune_request = FinetuneRequest(
245
- model=model,
246
- training_file=training_file,
247
- validation_file=validation_file,
248
- n_epochs=n_epochs,
249
- n_evals=n_evals,
250
- n_checkpoints=n_checkpoints,
251
- batch_size=batch_size,
252
- learning_rate=learning_rate,
253
- lr_scheduler=lr_scheduler,
254
- warmup_ratio=warmup_ratio,
255
- max_grad_norm=max_grad_norm,
256
- weight_decay=weight_decay,
257
- training_type=training_type,
258
- suffix=suffix,
259
- wandb_key=wandb_api_key,
260
- wandb_base_url=wandb_base_url,
261
- wandb_project_name=wandb_project_name,
262
- wandb_name=wandb_name,
263
- training_method=training_method_cls,
264
- from_checkpoint=from_checkpoint,
265
- )
266
-
267
- return finetune_request
268
-
269
-
270
- def _parse_raw_checkpoints(
271
- checkpoints: List[Dict[str, str]], id: str
272
- ) -> List[FinetuneCheckpoint]:
273
- """
274
- Helper function to process raw checkpoints and create checkpoint list.
275
-
276
- Args:
277
- checkpoints (List[Dict[str, str]]): List of raw checkpoints metadata
278
- id (str): Fine-tune job ID
279
-
280
- Returns:
281
- List[FinetuneCheckpoint]: List of available checkpoints
282
- """
283
-
284
- parsed_checkpoints = []
285
- for checkpoint in checkpoints:
286
- step = checkpoint["step"]
287
- checkpoint_type = checkpoint["checkpoint_type"]
288
- checkpoint_name = (
289
- f"{id}:{step}" if "intermediate" in checkpoint_type.lower() else id
290
- )
291
-
292
- parsed_checkpoints.append(
293
- FinetuneCheckpoint(
294
- type=checkpoint_type,
295
- timestamp=checkpoint["created_at"],
296
- name=checkpoint_name,
297
- )
298
- )
299
-
300
- parsed_checkpoints.sort(key=lambda x: x.timestamp, reverse=True)
301
- return parsed_checkpoints
302
-
303
-
304
- class FineTuning:
305
- def __init__(self, client: TogetherClient) -> None:
306
- self._client = client
307
-
308
- def create(
309
- self,
310
- *,
311
- training_file: str,
312
- model: str | None = None,
313
- n_epochs: int = 1,
314
- validation_file: str | None = "",
315
- n_evals: int | None = 0,
316
- n_checkpoints: int | None = 1,
317
- batch_size: int | Literal["max"] = "max",
318
- learning_rate: float | None = 0.00001,
319
- lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
320
- min_lr_ratio: float = 0.0,
321
- scheduler_num_cycles: float = 0.5,
322
- warmup_ratio: float = 0.0,
323
- max_grad_norm: float = 1.0,
324
- weight_decay: float = 0.0,
325
- lora: bool = True,
326
- lora_r: int | None = None,
327
- lora_dropout: float | None = 0,
328
- lora_alpha: float | None = None,
329
- lora_trainable_modules: str | None = "all-linear",
330
- suffix: str | None = None,
331
- wandb_api_key: str | None = None,
332
- wandb_base_url: str | None = None,
333
- wandb_project_name: str | None = None,
334
- wandb_name: str | None = None,
335
- verbose: bool = False,
336
- model_limits: FinetuneTrainingLimits | None = None,
337
- train_on_inputs: bool | Literal["auto"] | None = None,
338
- training_method: str = "sft",
339
- dpo_beta: float | None = None,
340
- dpo_normalize_logratios_by_length: bool = False,
341
- rpo_alpha: float | None = None,
342
- simpo_gamma: float | None = None,
343
- from_checkpoint: str | None = None,
344
- ) -> FinetuneResponse:
345
- """
346
- Method to initiate a fine-tuning job
347
-
348
- Args:
349
- training_file (str): File-ID of a file uploaded to the Together API
350
- model (str, optional): Name of the base model to run fine-tune job on
351
- n_epochs (int, optional): Number of epochs for fine-tuning. Defaults to 1.
352
- validation file (str, optional): File ID of a file uploaded to the Together API for validation.
353
- n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
354
- n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
355
- Defaults to 1.
356
- batch_size (int or "max"): Batch size for fine-tuning. Defaults to max.
357
- learning_rate (float, optional): Learning rate multiplier to use for training
358
- Defaults to 0.00001.
359
- lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "cosine".
360
- min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
361
- the learning rate scheduler. Defaults to 0.0.
362
- scheduler_num_cycles (float, optional): Number or fraction of cycles for the cosine learning rate scheduler. Defaults to 0.5.
363
- warmup_ratio (float, optional): Warmup ratio for the learning rate scheduler.
364
- max_grad_norm (float, optional): Max gradient norm. Defaults to 1.0, set to 0 to disable.
365
- weight_decay (float, optional): Weight decay. Defaults to 0.0.
366
- lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
367
- lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
368
- lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
369
- lora_alpha (float, optional): Alpha for LoRA adapters. Defaults to 8.
370
- lora_trainable_modules (str, optional): Trainable modules for LoRA adapters. Defaults to "all-linear".
371
- suffix (str, optional): Up to 40 character suffix that will be added to your fine-tuned model name.
372
- Defaults to None.
373
- wandb_api_key (str, optional): API key for Weights & Biases integration.
374
- Defaults to None.
375
- wandb_base_url (str, optional): Base URL for Weights & Biases integration.
376
- Defaults to None.
377
- wandb_project_name (str, optional): Project name for Weights & Biases integration.
378
- Defaults to None.
379
- wandb_name (str, optional): Run name for Weights & Biases integration.
380
- Defaults to None.
381
- verbose (bool, optional): whether to print the job parameters before submitting a request.
382
- Defaults to False.
383
- model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
384
- Defaults to None.
385
- train_on_inputs (bool or "auto", optional): Whether to mask the user messages in conversational data or prompts in instruction data.
386
- "auto" will automatically determine whether to mask the inputs based on the data format.
387
- For datasets with the "text" field (general format), inputs will not be masked.
388
- For datasets with the "messages" field (conversational format) or "prompt" and "completion" fields
389
- (Instruction format), inputs will be masked.
390
- Defaults to None, or "auto" if training_method is "sft" (set in create_finetune_request).
391
- training_method (str, optional): Training method. Defaults to "sft".
392
- Supported methods: "sft", "dpo".
393
- dpo_beta (float, optional): DPO beta parameter. Defaults to None.
394
- dpo_normalize_logratios_by_length (bool): Whether or not normalize logratios by sample length. Defaults to False,
395
- rpo_alpha (float, optional): RPO alpha parameter of DPO training to include NLL in the loss. Defaults to None.
396
- simpo_gamma: (float, optional): SimPO gamma parameter. Defaults to None.
397
- from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
398
- The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
399
- The step value is optional, without it the final checkpoint will be used.
400
-
401
- Returns:
402
- FinetuneResponse: Object containing information about fine-tuning job.
403
- """
404
-
405
- requestor = api_requestor.APIRequestor(
406
- client=self._client,
407
- )
408
-
409
- if model_limits is None:
410
- # mypy doesn't understand that model or from_checkpoint is not None
411
- if model is not None:
412
- model_name = model
413
- elif from_checkpoint is not None:
414
- model_name = from_checkpoint.split(":")[0]
415
- else:
416
- # this branch is unreachable, but mypy doesn't know that
417
- pass
418
- model_limits = self.get_model_limits(model=model_name)
419
-
420
- finetune_request = create_finetune_request(
421
- model_limits=model_limits,
422
- training_file=training_file,
423
- model=model,
424
- n_epochs=n_epochs,
425
- validation_file=validation_file,
426
- n_evals=n_evals,
427
- n_checkpoints=n_checkpoints,
428
- batch_size=batch_size,
429
- learning_rate=learning_rate,
430
- lr_scheduler_type=lr_scheduler_type,
431
- min_lr_ratio=min_lr_ratio,
432
- scheduler_num_cycles=scheduler_num_cycles,
433
- warmup_ratio=warmup_ratio,
434
- max_grad_norm=max_grad_norm,
435
- weight_decay=weight_decay,
436
- lora=lora,
437
- lora_r=lora_r,
438
- lora_dropout=lora_dropout,
439
- lora_alpha=lora_alpha,
440
- lora_trainable_modules=lora_trainable_modules,
441
- suffix=suffix,
442
- wandb_api_key=wandb_api_key,
443
- wandb_base_url=wandb_base_url,
444
- wandb_project_name=wandb_project_name,
445
- wandb_name=wandb_name,
446
- train_on_inputs=train_on_inputs,
447
- training_method=training_method,
448
- dpo_beta=dpo_beta,
449
- dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
450
- rpo_alpha=rpo_alpha,
451
- simpo_gamma=simpo_gamma,
452
- from_checkpoint=from_checkpoint,
453
- )
454
-
455
- if verbose:
456
- rprint(
457
- "Submitting a fine-tuning job with the following parameters:",
458
- finetune_request,
459
- )
460
- parameter_payload = finetune_request.model_dump(exclude_none=True)
461
-
462
- response, _, _ = requestor.request(
463
- options=TogetherRequest(
464
- method="POST",
465
- url="fine-tunes",
466
- params=parameter_payload,
467
- ),
468
- stream=False,
469
- )
470
- assert isinstance(response, TogetherResponse)
471
-
472
- return FinetuneResponse(**response.data)
473
-
474
- def list(self) -> FinetuneList:
475
- """
476
- Lists fine-tune job history
477
-
478
- Returns:
479
- FinetuneList: Object containing a list of fine-tune jobs
480
- """
481
-
482
- requestor = api_requestor.APIRequestor(
483
- client=self._client,
484
- )
485
-
486
- response, _, _ = requestor.request(
487
- options=TogetherRequest(
488
- method="GET",
489
- url="fine-tunes",
490
- ),
491
- stream=False,
492
- )
493
-
494
- assert isinstance(response, TogetherResponse)
495
-
496
- return FinetuneList(**response.data)
497
-
498
- def retrieve(self, id: str) -> FinetuneResponse:
499
- """
500
- Retrieves fine-tune job details
501
-
502
- Args:
503
- id (str): Fine-tune ID to retrieve. A string that starts with `ft-`.
504
-
505
- Returns:
506
- FinetuneResponse: Object containing information about fine-tuning job.
507
- """
508
-
509
- requestor = api_requestor.APIRequestor(
510
- client=self._client,
511
- )
512
-
513
- response, _, _ = requestor.request(
514
- options=TogetherRequest(
515
- method="GET",
516
- url=f"fine-tunes/{id}",
517
- ),
518
- stream=False,
519
- )
520
-
521
- assert isinstance(response, TogetherResponse)
522
-
523
- return FinetuneResponse(**response.data)
524
-
525
- def cancel(self, id: str) -> FinetuneResponse:
526
- """
527
- Method to cancel a running fine-tuning job
528
-
529
- Args:
530
- id (str): Fine-tune ID to cancel. A string that starts with `ft-`.
531
-
532
- Returns:
533
- FinetuneResponse: Object containing information about cancelled fine-tuning job.
534
- """
535
-
536
- requestor = api_requestor.APIRequestor(
537
- client=self._client,
538
- )
539
-
540
- response, _, _ = requestor.request(
541
- options=TogetherRequest(
542
- method="POST",
543
- url=f"fine-tunes/{id}/cancel",
544
- ),
545
- stream=False,
546
- )
547
-
548
- assert isinstance(response, TogetherResponse)
549
-
550
- return FinetuneResponse(**response.data)
551
-
552
- def list_events(self, id: str) -> FinetuneListEvents:
553
- """
554
- Lists events of a fine-tune job
555
-
556
- Args:
557
- id (str): Fine-tune ID to list events for. A string that starts with `ft-`.
558
-
559
- Returns:
560
- FinetuneListEvents: Object containing list of fine-tune events
561
- """
562
-
563
- requestor = api_requestor.APIRequestor(
564
- client=self._client,
565
- )
566
-
567
- response, _, _ = requestor.request(
568
- options=TogetherRequest(
569
- method="GET",
570
- url=f"fine-tunes/{id}/events",
571
- ),
572
- stream=False,
573
- )
574
- assert isinstance(response, TogetherResponse)
575
-
576
- return FinetuneListEvents(**response.data)
577
-
578
- def list_checkpoints(self, id: str) -> List[FinetuneCheckpoint]:
579
- """
580
- List available checkpoints for a fine-tuning job
581
-
582
- Args:
583
- id (str): Unique identifier of the fine-tune job to list checkpoints for
584
-
585
- Returns:
586
- List[FinetuneCheckpoint]: List of available checkpoints
587
- """
588
- requestor = api_requestor.APIRequestor(
589
- client=self._client,
590
- )
591
-
592
- response, _, _ = requestor.request(
593
- options=TogetherRequest(
594
- method="GET",
595
- url=f"fine-tunes/{id}/checkpoints",
596
- ),
597
- stream=False,
598
- )
599
- assert isinstance(response, TogetherResponse)
600
-
601
- raw_checkpoints = response.data["data"]
602
- return _parse_raw_checkpoints(raw_checkpoints, id)
603
-
604
- def download(
605
- self,
606
- id: str,
607
- *,
608
- output: Path | str | None = None,
609
- checkpoint_step: int | None = None,
610
- checkpoint_type: DownloadCheckpointType | str = DownloadCheckpointType.DEFAULT,
611
- ) -> FinetuneDownloadResult:
612
- """
613
- Downloads compressed fine-tuned model or checkpoint to local disk.
614
-
615
- Defaults file location to `$PWD/{model_name}.{extension}`
616
-
617
- Args:
618
- id (str): Fine-tune ID to download. A string that starts with `ft-`.
619
- output (pathlib.Path | str, optional): Specifies output file name for downloaded model.
620
- Defaults to None.
621
- checkpoint_step (int, optional): Specifies step number for checkpoint to download.
622
- Defaults to -1 (download the final model)
623
- checkpoint_type (CheckpointType | str, optional): Specifies which checkpoint to download.
624
- Defaults to CheckpointType.DEFAULT.
625
-
626
- Returns:
627
- FinetuneDownloadResult: Object containing downloaded model metadata
628
- """
629
-
630
- if re.match(_FT_JOB_WITH_STEP_REGEX, id) is not None:
631
- if checkpoint_step is None:
632
- checkpoint_step = int(id.split(":")[1])
633
- id = id.split(":")[0]
634
- else:
635
- raise ValueError(
636
- "Fine-tuning job ID {id} contains a colon to specify the step to download, but `checkpoint_step` "
637
- "was also set. Remove one of the step specifiers to proceed."
638
- )
639
-
640
- url = f"finetune/download?ft_id={id}"
641
-
642
- if checkpoint_step is not None:
643
- url += f"&checkpoint_step={checkpoint_step}"
644
-
645
- ft_job = self.retrieve(id)
646
-
647
- # convert str to DownloadCheckpointType
648
- if isinstance(checkpoint_type, str):
649
- try:
650
- checkpoint_type = DownloadCheckpointType(checkpoint_type.lower())
651
- except ValueError:
652
- enum_strs = ", ".join(e.value for e in DownloadCheckpointType)
653
- raise ValueError(
654
- f"Invalid checkpoint type: {checkpoint_type}. Choose one of {{{enum_strs}}}."
655
- )
656
-
657
- if isinstance(ft_job.training_type, FullTrainingType):
658
- if checkpoint_type != DownloadCheckpointType.DEFAULT:
659
- raise ValueError(
660
- "Only DEFAULT checkpoint type is allowed for FullTrainingType"
661
- )
662
- url += "&checkpoint=model_output_path"
663
- elif isinstance(ft_job.training_type, LoRATrainingType):
664
- if checkpoint_type == DownloadCheckpointType.DEFAULT:
665
- checkpoint_type = DownloadCheckpointType.MERGED
666
-
667
- if checkpoint_type in {
668
- DownloadCheckpointType.MERGED,
669
- DownloadCheckpointType.ADAPTER,
670
- }:
671
- url += f"&checkpoint={checkpoint_type.value}"
672
- else:
673
- raise ValueError(
674
- f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}"
675
- )
676
-
677
- remote_name = ft_job.output_name
678
-
679
- download_manager = DownloadManager(self._client)
680
-
681
- if isinstance(output, str):
682
- output = Path(output)
683
-
684
- downloaded_filename, file_size = download_manager.download(
685
- url, output, normalize_key(remote_name or id), fetch_metadata=True
686
- )
687
-
688
- return FinetuneDownloadResult(
689
- object="local",
690
- id=id,
691
- checkpoint_step=checkpoint_step,
692
- filename=downloaded_filename,
693
- size=file_size,
694
- )
695
-
696
- def get_model_limits(self, *, model: str) -> FinetuneTrainingLimits:
697
- """
698
- Requests training limits for a specific model
699
-
700
- Args:
701
- model_name (str): Name of the model to get limits for
702
-
703
- Returns:
704
- FinetuneTrainingLimits: Object containing training limits for the model
705
- """
706
-
707
- requestor = api_requestor.APIRequestor(
708
- client=self._client,
709
- )
710
-
711
- model_limits_response, _, _ = requestor.request(
712
- options=TogetherRequest(
713
- method="GET",
714
- url="fine-tunes/models/limits",
715
- params={"model_name": model},
716
- ),
717
- stream=False,
718
- )
719
-
720
- model_limits = FinetuneTrainingLimits(**model_limits_response.data)
721
-
722
- return model_limits
723
-
724
-
725
- class AsyncFineTuning:
726
- def __init__(self, client: TogetherClient) -> None:
727
- self._client = client
728
-
729
- async def create(
730
- self,
731
- *,
732
- training_file: str,
733
- model: str | None = None,
734
- n_epochs: int = 1,
735
- validation_file: str | None = "",
736
- n_evals: int | None = 0,
737
- n_checkpoints: int | None = 1,
738
- batch_size: int | Literal["max"] = "max",
739
- learning_rate: float | None = 0.00001,
740
- lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
741
- min_lr_ratio: float = 0.0,
742
- scheduler_num_cycles: float = 0.5,
743
- warmup_ratio: float = 0.0,
744
- max_grad_norm: float = 1.0,
745
- weight_decay: float = 0.0,
746
- lora: bool = True,
747
- lora_r: int | None = None,
748
- lora_dropout: float | None = 0,
749
- lora_alpha: float | None = None,
750
- lora_trainable_modules: str | None = "all-linear",
751
- suffix: str | None = None,
752
- wandb_api_key: str | None = None,
753
- wandb_base_url: str | None = None,
754
- wandb_project_name: str | None = None,
755
- wandb_name: str | None = None,
756
- verbose: bool = False,
757
- model_limits: FinetuneTrainingLimits | None = None,
758
- train_on_inputs: bool | Literal["auto"] | None = None,
759
- training_method: str = "sft",
760
- dpo_beta: float | None = None,
761
- dpo_normalize_logratios_by_length: bool = False,
762
- rpo_alpha: float | None = None,
763
- simpo_gamma: float | None = None,
764
- from_checkpoint: str | None = None,
765
- ) -> FinetuneResponse:
766
- """
767
- Async method to initiate a fine-tuning job
768
-
769
- Args:
770
- training_file (str): File-ID of a file uploaded to the Together API
771
- model (str, optional): Name of the base model to run fine-tune job on
772
- n_epochs (int, optional): Number of epochs for fine-tuning. Defaults to 1.
773
- validation file (str, optional): File ID of a file uploaded to the Together API for validation.
774
- n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
775
- n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
776
- Defaults to 1.
777
- batch_size (int, optional): Batch size for fine-tuning. Defaults to max.
778
- learning_rate (float, optional): Learning rate multiplier to use for training
779
- Defaults to 0.00001.
780
- lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "cosine".
781
- min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
782
- the learning rate scheduler. Defaults to 0.0.
783
- scheduler_num_cycles (float, optional): Number or fraction of cycles for the cosine learning rate scheduler. Defaults to 0.5.
784
- warmup_ratio (float, optional): Warmup ratio for the learning rate scheduler.
785
- max_grad_norm (float, optional): Max gradient norm. Defaults to 1.0, set to 0 to disable.
786
- weight_decay (float, optional): Weight decay. Defaults to 0.0.
787
- lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
788
- lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
789
- lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
790
- lora_alpha (float, optional): Alpha for LoRA adapters. Defaults to 8.
791
- lora_trainable_modules (str, optional): Trainable modules for LoRA adapters. Defaults to "all-linear".
792
- suffix (str, optional): Up to 40 character suffix that will be added to your fine-tuned model name.
793
- Defaults to None.
794
- wandb_api_key (str, optional): API key for Weights & Biases integration.
795
- Defaults to None.
796
- wandb_base_url (str, optional): Base URL for Weights & Biases integration.
797
- Defaults to None.
798
- wandb_project_name (str, optional): Project name for Weights & Biases integration.
799
- Defaults to None.
800
- wandb_name (str, optional): Run name for Weights & Biases integration.
801
- Defaults to None.
802
- verbose (bool, optional): whether to print the job parameters before submitting a request.
803
- Defaults to False.
804
- model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
805
- Defaults to None.
806
- train_on_inputs (bool or "auto"): Whether to mask the user messages in conversational data or prompts in instruction data.
807
- "auto" will automatically determine whether to mask the inputs based on the data format.
808
- For datasets with the "text" field (general format), inputs will not be masked.
809
- For datasets with the "messages" field (conversational format) or "prompt" and "completion" fields
810
- (Instruction format), inputs will be masked.
811
- Defaults to None, or "auto" if training_method is "sft" (set in create_finetune_request).
812
- training_method (str, optional): Training method. Defaults to "sft".
813
- Supported methods: "sft", "dpo".
814
- dpo_beta (float, optional): DPO beta parameter. Defaults to None.
815
- dpo_normalize_logratios_by_length (bool): Whether or not normalize logratios by sample length. Defaults to False,
816
- rpo_alpha (float, optional): RPO alpha parameter of DPO training to include NLL in the loss. Defaults to None.
817
- simpo_gamma: (float, optional): SimPO gamma parameter. Defaults to None.
818
- from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
819
- The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
820
- The step value is optional, without it the final checkpoint will be used.
821
-
822
- Returns:
823
- FinetuneResponse: Object containing information about fine-tuning job.
824
- """
825
-
826
- requestor = api_requestor.APIRequestor(
827
- client=self._client,
828
- )
829
-
830
- if model_limits is None:
831
- # mypy doesn't understand that model or from_checkpoint is not None
832
- if model is not None:
833
- model_name = model
834
- elif from_checkpoint is not None:
835
- model_name = from_checkpoint.split(":")[0]
836
- else:
837
- # this branch is unreachable, but mypy doesn't know that
838
- pass
839
- model_limits = await self.get_model_limits(model=model_name)
840
-
841
- finetune_request = create_finetune_request(
842
- model_limits=model_limits,
843
- training_file=training_file,
844
- model=model,
845
- n_epochs=n_epochs,
846
- validation_file=validation_file,
847
- n_evals=n_evals,
848
- n_checkpoints=n_checkpoints,
849
- batch_size=batch_size,
850
- learning_rate=learning_rate,
851
- lr_scheduler_type=lr_scheduler_type,
852
- min_lr_ratio=min_lr_ratio,
853
- scheduler_num_cycles=scheduler_num_cycles,
854
- warmup_ratio=warmup_ratio,
855
- max_grad_norm=max_grad_norm,
856
- weight_decay=weight_decay,
857
- lora=lora,
858
- lora_r=lora_r,
859
- lora_dropout=lora_dropout,
860
- lora_alpha=lora_alpha,
861
- lora_trainable_modules=lora_trainable_modules,
862
- suffix=suffix,
863
- wandb_api_key=wandb_api_key,
864
- wandb_base_url=wandb_base_url,
865
- wandb_project_name=wandb_project_name,
866
- wandb_name=wandb_name,
867
- train_on_inputs=train_on_inputs,
868
- training_method=training_method,
869
- dpo_beta=dpo_beta,
870
- dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
871
- rpo_alpha=rpo_alpha,
872
- simpo_gamma=simpo_gamma,
873
- from_checkpoint=from_checkpoint,
874
- )
875
-
876
- if verbose:
877
- rprint(
878
- "Submitting a fine-tuning job with the following parameters:",
879
- finetune_request,
880
- )
881
- parameter_payload = finetune_request.model_dump(exclude_none=True)
882
-
883
- response, _, _ = await requestor.arequest(
884
- options=TogetherRequest(
885
- method="POST",
886
- url="fine-tunes",
887
- params=parameter_payload,
888
- ),
889
- stream=False,
890
- )
891
-
892
- assert isinstance(response, TogetherResponse)
893
-
894
- return FinetuneResponse(**response.data)
895
-
896
- async def list(self) -> FinetuneList:
897
- """
898
- Async method to list fine-tune job history
899
-
900
- Returns:
901
- FinetuneList: Object containing a list of fine-tune jobs
902
- """
903
-
904
- requestor = api_requestor.APIRequestor(
905
- client=self._client,
906
- )
907
-
908
- response, _, _ = await requestor.arequest(
909
- options=TogetherRequest(
910
- method="GET",
911
- url="fine-tunes",
912
- ),
913
- stream=False,
914
- )
915
-
916
- assert isinstance(response, TogetherResponse)
917
-
918
- return FinetuneList(**response.data)
919
-
920
- async def retrieve(self, id: str) -> FinetuneResponse:
921
- """
922
- Async method to retrieve fine-tune job details
923
-
924
- Args:
925
- id (str): Fine-tune ID to retrieve. A string that starts with `ft-`.
926
-
927
- Returns:
928
- FinetuneResponse: Object containing information about fine-tuning job.
929
- """
930
-
931
- requestor = api_requestor.APIRequestor(
932
- client=self._client,
933
- )
934
-
935
- response, _, _ = await requestor.arequest(
936
- options=TogetherRequest(
937
- method="GET",
938
- url=f"fine-tunes/{id}",
939
- ),
940
- stream=False,
941
- )
942
-
943
- assert isinstance(response, TogetherResponse)
944
-
945
- return FinetuneResponse(**response.data)
946
-
947
- async def cancel(self, id: str) -> FinetuneResponse:
948
- """
949
- Async method to cancel a running fine-tuning job
950
-
951
- Args:
952
- id (str): Fine-tune ID to cancel. A string that starts with `ft-`.
953
-
954
- Returns:
955
- FinetuneResponse: Object containing information about cancelled fine-tuning job.
956
- """
957
-
958
- requestor = api_requestor.APIRequestor(
959
- client=self._client,
960
- )
961
-
962
- response, _, _ = await requestor.arequest(
963
- options=TogetherRequest(
964
- method="POST",
965
- url=f"fine-tunes/{id}/cancel",
966
- ),
967
- stream=False,
968
- )
969
-
970
- assert isinstance(response, TogetherResponse)
971
-
972
- return FinetuneResponse(**response.data)
973
-
974
- async def list_events(self, id: str) -> FinetuneListEvents:
975
- """
976
- List fine-tuning events
977
-
978
- Args:
979
- id (str): Unique identifier of the fine-tune job to list events for
980
-
981
- Returns:
982
- FinetuneListEvents: Object containing list of fine-tune job events
983
- """
984
-
985
- requestor = api_requestor.APIRequestor(
986
- client=self._client,
987
- )
988
-
989
- events_response, _, _ = await requestor.arequest(
990
- options=TogetherRequest(
991
- method="GET",
992
- url=f"fine-tunes/{normalize_key(id)}/events",
993
- ),
994
- stream=False,
995
- )
996
- assert isinstance(events_response, TogetherResponse)
997
-
998
- return FinetuneListEvents(**events_response.data)
999
-
1000
- async def list_checkpoints(self, id: str) -> List[FinetuneCheckpoint]:
1001
- """
1002
- List available checkpoints for a fine-tuning job
1003
-
1004
- Args:
1005
- id (str): Unique identifier of the fine-tune job to list checkpoints for
1006
-
1007
- Returns:
1008
- List[FinetuneCheckpoint]: List of available checkpoints
1009
- """
1010
- requestor = api_requestor.APIRequestor(
1011
- client=self._client,
1012
- )
1013
-
1014
- response, _, _ = await requestor.arequest(
1015
- options=TogetherRequest(
1016
- method="GET",
1017
- url=f"fine-tunes/{id}/checkpoints",
1018
- ),
1019
- stream=False,
1020
- )
1021
- assert isinstance(response, TogetherResponse)
1022
-
1023
- raw_checkpoints = response.data["data"]
1024
- return _parse_raw_checkpoints(raw_checkpoints, id)
1025
-
1026
- async def download(
1027
- self, id: str, *, output: str | None = None, checkpoint_step: int = -1
1028
- ) -> str:
1029
- """
1030
- TODO: Implement async download method
1031
- """
1032
-
1033
- raise NotImplementedError(
1034
- "AsyncFineTuning.download not implemented. "
1035
- "Please use FineTuning.download function instead."
1036
- )
1037
-
1038
- async def get_model_limits(self, *, model: str) -> FinetuneTrainingLimits:
1039
- """
1040
- Requests training limits for a specific model
1041
-
1042
- Args:
1043
- model_name (str): Name of the model to get limits for
1044
-
1045
- Returns:
1046
- FinetuneTrainingLimits: Object containing training limits for the model
1047
- """
1048
-
1049
- requestor = api_requestor.APIRequestor(
1050
- client=self._client,
1051
- )
1052
-
1053
- model_limits_response, _, _ = await requestor.arequest(
1054
- options=TogetherRequest(
1055
- method="GET",
1056
- url="fine-tunes/models/limits",
1057
- params={"model": model},
1058
- ),
1059
- stream=False,
1060
- )
1061
-
1062
- model_limits = FinetuneTrainingLimits(**model_limits_response.data)
1063
-
1064
- return model_limits