together 1.2.11__py3-none-any.whl → 2.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (201) hide show
  1. together/__init__.py +101 -63
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/lib/cli/api/endpoints.py +467 -0
  30. together/lib/cli/api/evals.py +588 -0
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/lib/cli/api/fine_tuning.py +566 -0
  33. together/lib/cli/api/models.py +140 -0
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +17 -23
  36. together/lib/constants.py +61 -0
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +455 -0
  44. together/{utils → lib/utils}/__init__.py +7 -10
  45. together/{utils → lib/utils}/_log.py +18 -13
  46. together/lib/utils/files.py +628 -0
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +17 -2
  49. together/resources/__init__.py +225 -24
  50. together/resources/audio/__init__.py +75 -0
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +605 -0
  53. together/resources/audio/transcriptions.py +282 -0
  54. together/resources/audio/translations.py +256 -0
  55. together/resources/audio/voices.py +135 -0
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -257
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +890 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +711 -0
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -120
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +256 -108
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +251 -44
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +374 -0
  75. together/types/__init__.py +66 -73
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_job.py +57 -0
  157. together-2.0.0a8.dist-info/METADATA +680 -0
  158. together-2.0.0a8.dist-info/RECORD +164 -0
  159. {together-1.2.11.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
  160. together-2.0.0a8.dist-info/entry_points.txt +2 -0
  161. {together-1.2.11.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
  162. together/abstract/api_requestor.py +0 -723
  163. together/cli/api/chat.py +0 -276
  164. together/cli/api/completions.py +0 -119
  165. together/cli/api/finetune.py +0 -272
  166. together/cli/api/images.py +0 -82
  167. together/cli/api/models.py +0 -42
  168. together/client.py +0 -157
  169. together/constants.py +0 -31
  170. together/error.py +0 -191
  171. together/filemanager.py +0 -388
  172. together/legacy/__init__.py +0 -0
  173. together/legacy/base.py +0 -27
  174. together/legacy/complete.py +0 -93
  175. together/legacy/embeddings.py +0 -27
  176. together/legacy/files.py +0 -146
  177. together/legacy/finetune.py +0 -177
  178. together/legacy/images.py +0 -27
  179. together/legacy/models.py +0 -44
  180. together/resources/finetune.py +0 -489
  181. together/together_response.py +0 -50
  182. together/types/abstract.py +0 -26
  183. together/types/chat_completions.py +0 -171
  184. together/types/common.py +0 -65
  185. together/types/completions.py +0 -104
  186. together/types/embeddings.py +0 -35
  187. together/types/error.py +0 -16
  188. together/types/files.py +0 -89
  189. together/types/finetune.py +0 -265
  190. together/types/images.py +0 -42
  191. together/types/models.py +0 -44
  192. together/types/rerank.py +0 -43
  193. together/utils/api_helpers.py +0 -84
  194. together/utils/files.py +0 -204
  195. together/version.py +0 -6
  196. together-1.2.11.dist-info/METADATA +0 -408
  197. together-1.2.11.dist-info/RECORD +0 -58
  198. together-1.2.11.dist-info/entry_points.txt +0 -3
  199. /together/{abstract → lib/cli}/__init__.py +0 -0
  200. /together/{cli → lib/cli/api}/__init__.py +0 -0
  201. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -0,0 +1,566 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ import json
5
+ from typing import Any, Dict, List, Union, Literal
6
+ from pathlib import Path
7
+ from datetime import datetime, timezone
8
+ from textwrap import wrap
9
+
10
+ import click
11
+ from rich import print as rprint
12
+ from tabulate import tabulate
13
+ from click.core import ParameterSource # type: ignore[attr-defined]
14
+
15
+ from together import Together
16
+ from together._types import NOT_GIVEN, NotGiven
17
+ from together.lib.utils import log_warn
18
+ from together.lib.utils.tools import format_timestamp, finetune_price_to_dollars
19
+ from together.lib.cli.api.utils import INT_WITH_MAX, BOOL_WITH_AUTO
20
+ from together.lib.resources.files import DownloadManager
21
+ from together.lib.utils.serializer import datetime_serializer
22
+ from together.types.finetune_response import TrainingTypeFullTrainingType, TrainingTypeLoRaTrainingType
23
+ from together.lib.resources.fine_tuning import get_model_limits
24
+
25
+ _CONFIRMATION_MESSAGE = (
26
+ "You are about to create a fine-tuning job. "
27
+ "The cost of your job will be determined by the model size, the number of tokens "
28
+ "in the training file, the number of tokens in the validation file, the number of epochs, and "
29
+ "the number of evaluations. Visit https://www.together.ai/pricing to get a price estimate.\n"
30
+ "You can pass `-y` or `--confirm` to your command to skip this message.\n\n"
31
+ "Do you want to proceed?"
32
+ )
33
+
34
+ _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$"
35
+
36
+
37
+ @click.group(name="fine-tuning")
38
+ @click.pass_context
39
+ def fine_tuning(ctx: click.Context) -> None:
40
+ """Fine-tunes API commands"""
41
+ pass
42
+
43
+
44
+ @fine_tuning.command()
45
+ @click.pass_context
46
+ @click.option(
47
+ "--training-file",
48
+ "-t",
49
+ type=str,
50
+ required=True,
51
+ help="Training file ID from Files API",
52
+ )
53
+ @click.option("--model", "-m", type=str, help="Base model name")
54
+ @click.option("--n-epochs", "-ne", type=int, default=1, help="Number of epochs to train for")
55
+ @click.option("--validation-file", type=str, default="", help="Validation file ID from Files API")
56
+ @click.option("--n-evals", type=int, default=0, help="Number of evaluation loops")
57
+ @click.option("--n-checkpoints", "-c", type=int, default=1, help="Number of checkpoints to save")
58
+ @click.option("--batch-size", "-b", type=INT_WITH_MAX, default="max", help="Train batch size")
59
+ @click.option("--learning-rate", "-lr", type=float, default=1e-5, help="Learning rate")
60
+ @click.option(
61
+ "--lr-scheduler-type",
62
+ type=click.Choice(["linear", "cosine"]),
63
+ default="cosine",
64
+ help="Learning rate scheduler type",
65
+ )
66
+ @click.option(
67
+ "--min-lr-ratio",
68
+ type=float,
69
+ default=0.0,
70
+ help="The ratio of the final learning rate to the peak learning rate",
71
+ )
72
+ @click.option(
73
+ "--scheduler-num-cycles",
74
+ type=float,
75
+ default=0.5,
76
+ help="Number or fraction of cycles for the cosine learning rate scheduler.",
77
+ )
78
+ @click.option(
79
+ "--warmup-ratio",
80
+ type=float,
81
+ default=0.0,
82
+ help="Warmup ratio for the learning rate scheduler.",
83
+ )
84
+ @click.option(
85
+ "--max-grad-norm",
86
+ type=float,
87
+ default=1.0,
88
+ help="Max gradient norm to be used for gradient clipping. Set to 0 to disable.",
89
+ )
90
+ @click.option(
91
+ "--weight-decay",
92
+ type=float,
93
+ default=0.0,
94
+ help="Weight decay",
95
+ )
96
+ @click.option(
97
+ "--lora/--no-lora",
98
+ type=bool,
99
+ default=True,
100
+ help="Whether to use LoRA adapters for fine-tuning",
101
+ )
102
+ @click.option("--lora-r", type=int, default=8, help="LoRA adapters' rank")
103
+ @click.option("--lora-dropout", type=float, default=0, help="LoRA adapters' dropout")
104
+ @click.option("--lora-alpha", type=float, default=8, help="LoRA adapters' alpha")
105
+ @click.option(
106
+ "--lora-trainable-modules",
107
+ type=str,
108
+ default="all-linear",
109
+ help="Trainable modules for LoRA adapters. For example, 'all-linear', 'q_proj,v_proj'",
110
+ )
111
+ @click.option(
112
+ "--training-method",
113
+ type=click.Choice(["sft", "dpo"]),
114
+ default="sft",
115
+ help="Training method to use. Options: sft (supervised fine-tuning), dpo (Direct Preference Optimization)",
116
+ )
117
+ @click.option(
118
+ "--dpo-beta",
119
+ type=float,
120
+ default=None,
121
+ help="Beta parameter for DPO training (only used when '--training-method' is 'dpo')",
122
+ )
123
+ @click.option(
124
+ "--dpo-normalize-logratios-by-length",
125
+ type=bool,
126
+ default=False,
127
+ help=("Whether to normalize logratios by sample length (only used when '--training-method' is 'dpo')"),
128
+ )
129
+ @click.option(
130
+ "--rpo-alpha",
131
+ type=float,
132
+ default=None,
133
+ help=(
134
+ "RPO alpha parameter of DPO training to include NLL in the loss (only used when '--training-method' is 'dpo')"
135
+ ),
136
+ )
137
+ @click.option(
138
+ "--simpo-gamma",
139
+ type=float,
140
+ default=None,
141
+ help="SimPO gamma parameter (only used when '--training-method' is 'dpo')",
142
+ )
143
+ @click.option(
144
+ "--suffix",
145
+ "-s",
146
+ type=str,
147
+ default=None,
148
+ help="Suffix for the fine-tuned model name",
149
+ )
150
+ @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
151
+ @click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL")
152
+ @click.option("--wandb-project-name", type=str, default=None, help="Wandb project name")
153
+ @click.option("--wandb-name", type=str, default=None, help="Wandb run name")
154
+ @click.option(
155
+ "--confirm",
156
+ "-y",
157
+ type=bool,
158
+ is_flag=True,
159
+ default=False,
160
+ help="Whether to skip the launch confirmation message",
161
+ )
162
+ @click.option(
163
+ "--train-on-inputs",
164
+ type=BOOL_WITH_AUTO,
165
+ default=None,
166
+ help="Whether to mask the user messages in conversational data or prompts in instruction data. "
167
+ "`auto` will automatically determine whether to mask the inputs based on the data format.",
168
+ )
169
+ @click.option(
170
+ "--from-checkpoint",
171
+ type=str,
172
+ default=None,
173
+ help="The checkpoint identifier to continue training from a previous fine-tuning job. "
174
+ "The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. "
175
+ "The step value is optional, without it the final checkpoint will be used.",
176
+ )
177
+ @click.option(
178
+ "--from-hf-model",
179
+ type=str,
180
+ help="The Hugging Face Hub repo to start training from. "
181
+ "Should be as close as possible to the base model (specified by the `model` argument) "
182
+ "in terms of architecture and size",
183
+ )
184
+ @click.option(
185
+ "--hf-model-revision",
186
+ type=str,
187
+ help="The revision of the Hugging Face Hub model to continue training from. "
188
+ "Example: hf_model_revision=None (defaults to the latest revision in `main`) "
189
+ "or hf_model_revision='607a30d783dfa663caf39e06633721c8d4cfcd7e' (specific commit).",
190
+ )
191
+ @click.option(
192
+ "--hf-api-token",
193
+ type=str,
194
+ default=None,
195
+ help="HF API token to use for uploading a checkpoint to a private repo",
196
+ )
197
+ @click.option(
198
+ "--hf-output-repo-name",
199
+ type=str,
200
+ default=None,
201
+ help="HF repo to upload the fine-tuned model to",
202
+ )
203
+ def create(
204
+ ctx: click.Context,
205
+ training_file: str,
206
+ validation_file: str,
207
+ model: str | None,
208
+ n_epochs: int,
209
+ n_evals: int,
210
+ n_checkpoints: int,
211
+ batch_size: int | Literal["max"],
212
+ learning_rate: float,
213
+ lr_scheduler_type: Literal["linear", "cosine"],
214
+ min_lr_ratio: float,
215
+ scheduler_num_cycles: float,
216
+ warmup_ratio: float,
217
+ max_grad_norm: float,
218
+ weight_decay: float,
219
+ lora: bool | None,
220
+ lora_r: int | None,
221
+ lora_dropout: float | None,
222
+ lora_alpha: float | None,
223
+ lora_trainable_modules: str | None,
224
+ suffix: str | None,
225
+ wandb_api_key: str | None,
226
+ wandb_base_url: str | None,
227
+ wandb_project_name: str | None,
228
+ wandb_name: str | None,
229
+ confirm: bool | None,
230
+ train_on_inputs: bool | Literal["auto"] | None,
231
+ training_method: str | None,
232
+ dpo_beta: float | None,
233
+ dpo_normalize_logratios_by_length: bool | None,
234
+ rpo_alpha: float | None,
235
+ simpo_gamma: float | None,
236
+ from_checkpoint: str | None,
237
+ from_hf_model: str | None,
238
+ hf_model_revision: str | None,
239
+ hf_api_token: str | None,
240
+ hf_output_repo_name: str | None,
241
+ ) -> None:
242
+ """Start fine-tuning"""
243
+ client: Together = ctx.obj
244
+
245
+ training_args: dict[str, Any] = dict(
246
+ training_file=training_file,
247
+ model=model,
248
+ n_epochs=n_epochs,
249
+ validation_file=validation_file,
250
+ n_evals=n_evals,
251
+ n_checkpoints=n_checkpoints,
252
+ batch_size=batch_size,
253
+ learning_rate=learning_rate,
254
+ lr_scheduler_type=lr_scheduler_type,
255
+ min_lr_ratio=min_lr_ratio,
256
+ scheduler_num_cycles=scheduler_num_cycles,
257
+ warmup_ratio=warmup_ratio,
258
+ max_grad_norm=max_grad_norm,
259
+ weight_decay=weight_decay,
260
+ lora=lora,
261
+ lora_r=lora_r,
262
+ lora_dropout=lora_dropout,
263
+ lora_alpha=lora_alpha,
264
+ lora_trainable_modules=lora_trainable_modules,
265
+ suffix=suffix,
266
+ wandb_api_key=wandb_api_key,
267
+ wandb_base_url=wandb_base_url,
268
+ wandb_project_name=wandb_project_name,
269
+ wandb_name=wandb_name,
270
+ train_on_inputs=train_on_inputs,
271
+ training_method=training_method,
272
+ dpo_beta=dpo_beta,
273
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
274
+ rpo_alpha=rpo_alpha,
275
+ simpo_gamma=simpo_gamma,
276
+ from_checkpoint=from_checkpoint,
277
+ from_hf_model=from_hf_model,
278
+ hf_model_revision=hf_model_revision,
279
+ hf_api_token=hf_api_token,
280
+ hf_output_repo_name=hf_output_repo_name,
281
+ )
282
+
283
+ if model is None and from_checkpoint is None:
284
+ raise click.BadParameter("You must specify either a model or a checkpoint")
285
+
286
+ model_name = model
287
+ if from_checkpoint is not None:
288
+ model_name = from_checkpoint.split(":")[0]
289
+
290
+ model_limits = get_model_limits(client, str(model_name))
291
+
292
+ if lora:
293
+ if model_limits.lora_training is None:
294
+ raise click.BadParameter(f"LoRA fine-tuning is not supported for the model `{model}`")
295
+ default_values = {
296
+ "lora_r": model_limits.lora_training.max_rank,
297
+ "learning_rate": 1e-3,
298
+ }
299
+
300
+ for arg in default_values:
301
+ arg_source = ctx.get_parameter_source("arg") # type: ignore[attr-defined]
302
+ if arg_source == ParameterSource.DEFAULT:
303
+ training_args[arg] = default_values[str(arg_source)]
304
+
305
+ if ctx.get_parameter_source("lora_alpha") == ParameterSource.DEFAULT: # type: ignore[attr-defined]
306
+ training_args["lora_alpha"] = training_args["lora_r"] * 2
307
+ else:
308
+ if model_limits.full_training is None:
309
+ raise click.BadParameter(f"Full fine-tuning is not supported for the model `{model}`")
310
+
311
+ for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
312
+ param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined]
313
+ if param_source != ParameterSource.DEFAULT:
314
+ raise click.BadParameter(
315
+ f"You set LoRA parameter `{param}` for a full fine-tuning job. "
316
+ f"Please change the job type with --lora or remove `{param}` from the arguments"
317
+ )
318
+
319
+ if n_evals <= 0 and validation_file:
320
+ log_warn(
321
+ "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
322
+ )
323
+ elif n_evals > 0 and not validation_file:
324
+ raise click.BadParameter("You have specified a number of evaluation loops but no validation file.")
325
+
326
+ if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True):
327
+ response = client.fine_tuning.create(
328
+ **training_args,
329
+ verbose=True,
330
+ )
331
+
332
+ report_string = f"Successfully submitted a fine-tuning job {response.id}"
333
+ # created_at reports UTC time, we use .astimezone() to convert to local time
334
+ formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
335
+ report_string += f" at {formatted_time}"
336
+ rprint(report_string)
337
+ else:
338
+ click.echo("No confirmation received, stopping job launch")
339
+
340
+
341
+ @fine_tuning.command()
342
+ @click.pass_context
343
+ def list(ctx: click.Context) -> None:
344
+ """List fine-tuning jobs"""
345
+ client: Together = ctx.obj
346
+
347
+ response = client.fine_tuning.list()
348
+
349
+ response.data = response.data or []
350
+
351
+ # Use a default datetime for None values to make sure the key function always returns a comparable value
352
+ epoch_start = datetime.fromtimestamp(0, tz=timezone.utc)
353
+ response.data.sort(key=lambda x: x.created_at or epoch_start)
354
+
355
+ display_list: List[Dict[str, Any]] = []
356
+ for i in response.data:
357
+ display_list.append(
358
+ {
359
+ "Fine-tune ID": i.id,
360
+ "Model Output Name": "\n".join(wrap(i.x_model_output_name or "", width=30)),
361
+ "Status": i.status,
362
+ "Created At": i.created_at,
363
+ "Price": f"""${
364
+ finetune_price_to_dollars(float(str(i.total_price)))
365
+ }""", # convert to string for mypy typing
366
+ }
367
+ )
368
+ table = tabulate(display_list, headers="keys", tablefmt="grid", showindex=True)
369
+
370
+ click.echo(table)
371
+
372
+
373
+ @fine_tuning.command()
374
+ @click.pass_context
375
+ @click.argument("fine_tune_id", type=str, required=True)
376
+ def retrieve(ctx: click.Context, fine_tune_id: str) -> None:
377
+ """Retrieve fine-tuning job details"""
378
+ client: Together = ctx.obj
379
+
380
+ response = client.fine_tuning.retrieve(fine_tune_id)
381
+
382
+ # remove events from response for cleaner output
383
+ response.events = None
384
+
385
+ click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
386
+
387
+
388
+ @fine_tuning.command()
389
+ @click.pass_context
390
+ @click.argument("fine_tune_id", type=str, required=True)
391
+ @click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job")
392
+ def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None:
393
+ """Cancel fine-tuning job"""
394
+ client: Together = ctx.obj
395
+ if not quiet:
396
+ confirm_response = input(
397
+ "You will be billed for any completed training steps upon cancellation. "
398
+ f"Do you want to cancel job {fine_tune_id}? [y/N]"
399
+ )
400
+ if "y" not in confirm_response.lower():
401
+ click.echo({"status": "Cancel not submitted"})
402
+ return
403
+ response = client.fine_tuning.cancel(fine_tune_id)
404
+
405
+ click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4, default=datetime_serializer))
406
+
407
+
408
+ @fine_tuning.command()
409
+ @click.pass_context
410
+ @click.argument("fine_tune_id", type=str, required=True)
411
+ def list_events(ctx: click.Context, fine_tune_id: str) -> None:
412
+ """List fine-tuning events"""
413
+ client: Together = ctx.obj
414
+
415
+ response = client.fine_tuning.list_events(fine_tune_id)
416
+
417
+ response.data = response.data or []
418
+
419
+ display_list: List[Dict[str, Any]] = []
420
+ for i in response.data:
421
+ display_list.append(
422
+ {
423
+ "Message": "\n".join(wrap(i.message or "", width=50)),
424
+ "Type": i.type,
425
+ "Created At": i.created_at,
426
+ "Hash": i.hash,
427
+ }
428
+ )
429
+ table = tabulate(display_list, headers="keys", tablefmt="grid", showindex=True)
430
+
431
+ click.echo(table)
432
+
433
+
434
+ @fine_tuning.command()
435
+ @click.pass_context
436
+ @click.argument("fine_tune_id", type=str, required=True)
437
+ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
438
+ """List available checkpoints for a fine-tuning job"""
439
+ client: Together = ctx.obj
440
+
441
+ checkpoints = client.fine_tuning.list_checkpoints(fine_tune_id)
442
+
443
+ display_list: List[Dict[str, Any]] = []
444
+ for checkpoint in checkpoints.data:
445
+ name = (
446
+ f"{fine_tune_id}:{checkpoint.step}"
447
+ if "intermediate" in checkpoint.checkpoint_type.lower()
448
+ else fine_tune_id
449
+ )
450
+ display_list.append(
451
+ {
452
+ "Type": checkpoint.checkpoint_type,
453
+ "Timestamp": format_timestamp(checkpoint.created_at),
454
+ "Name": name,
455
+ }
456
+ )
457
+
458
+ if display_list:
459
+ click.echo(f"Job {fine_tune_id} contains the following checkpoints:")
460
+ table = tabulate(display_list, headers="keys", tablefmt="grid")
461
+ click.echo(table)
462
+ click.echo("\nTo download a checkpoint, use `together fine-tuning download`")
463
+ else:
464
+ click.echo(f"No checkpoints found for job {fine_tune_id}")
465
+
466
+
467
+ @fine_tuning.command(name="download")
468
+ @click.pass_context
469
+ @click.argument("fine_tune_id", type=str, required=True)
470
+ @click.option(
471
+ "--output_dir",
472
+ "-o",
473
+ type=click.Path(exists=True, file_okay=False, resolve_path=True),
474
+ required=False,
475
+ default=None,
476
+ help="Output directory",
477
+ )
478
+ @click.option(
479
+ "--checkpoint-step",
480
+ "-s",
481
+ type=int,
482
+ required=False,
483
+ default=None,
484
+ help="Download fine-tuning checkpoint. Defaults to latest.",
485
+ )
486
+ @click.option(
487
+ "--checkpoint-type",
488
+ type=click.Choice(["merged", "adapter", "default"]),
489
+ required=False,
490
+ default="merged",
491
+ help="Specifies checkpoint type. 'merged' and 'adapter' options work only for LoRA jobs.",
492
+ )
493
+ def download(
494
+ ctx: click.Context,
495
+ fine_tune_id: str,
496
+ output_dir: str | None = None,
497
+ checkpoint_step: Union[int, NotGiven] = NOT_GIVEN,
498
+ checkpoint_type: Literal["default", "merged", "adapter"] | NotGiven = NOT_GIVEN,
499
+ ) -> None:
500
+ """Download fine-tuning checkpoint"""
501
+ client: Together = ctx.obj
502
+
503
+ if re.match(_FT_JOB_WITH_STEP_REGEX, fine_tune_id) is not None:
504
+ if checkpoint_step is NOT_GIVEN:
505
+ checkpoint_step = int(fine_tune_id.split(":")[1])
506
+ fine_tune_id = fine_tune_id.split(":")[0]
507
+ else:
508
+ raise ValueError(
509
+ "Fine-tuning job ID {fine_tune_id} contains a colon to specify the step to download, but `checkpoint_step` "
510
+ "was also set. Remove one of the step specifiers to proceed."
511
+ )
512
+
513
+ ft_job = client.fine_tuning.retrieve(fine_tune_id)
514
+
515
+ loosely_typed_checkpoint_type: str | NotGiven = checkpoint_type
516
+ if isinstance(ft_job.training_type, TrainingTypeFullTrainingType):
517
+ if checkpoint_type != "default":
518
+ raise ValueError("Only DEFAULT checkpoint type is allowed for FullTrainingType")
519
+ loosely_typed_checkpoint_type = "model_output_path"
520
+ elif isinstance(ft_job.training_type, TrainingTypeLoRaTrainingType):
521
+ if checkpoint_type == "default":
522
+ loosely_typed_checkpoint_type = "merged"
523
+
524
+ if checkpoint_type not in {
525
+ "merged",
526
+ "adapter",
527
+ }:
528
+ raise ValueError(f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}")
529
+
530
+ remote_name = ft_job.x_model_output_name
531
+
532
+ url = f"/finetune/download?ft_id={fine_tune_id}&checkpoint={loosely_typed_checkpoint_type}"
533
+ output: Path | None = None
534
+ if isinstance(output_dir, str):
535
+ output = Path(output_dir)
536
+
537
+ file_path, file_size = DownloadManager(client).download(
538
+ url=url,
539
+ output=output,
540
+ remote_name=remote_name,
541
+ fetch_metadata=True,
542
+ )
543
+
544
+ click.echo(json.dumps({"object": "local", "id": fine_tune_id, "filename": file_path, "size": file_size}, indent=4))
545
+
546
+
547
+ @fine_tuning.command()
548
+ @click.pass_context
549
+ @click.argument("fine_tune_id", type=str, required=True)
550
+ @click.option("--force", is_flag=True, help="Force deletion without confirmation")
551
+ @click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before deleting job")
552
+ def delete(ctx: click.Context, fine_tune_id: str, force: bool = False, quiet: bool = False) -> None:
553
+ """Delete fine-tuning job"""
554
+ client: Together = ctx.obj
555
+
556
+ if not quiet:
557
+ confirm_response = input(
558
+ f"Are you sure you want to delete fine-tuning job {fine_tune_id}? This action cannot be undone. [y/N] "
559
+ )
560
+ if confirm_response.lower() != "y":
561
+ click.echo("Deletion cancelled")
562
+ return
563
+
564
+ response = client.fine_tuning.delete(fine_tune_id, force=force)
565
+
566
+ click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
@@ -0,0 +1,140 @@
1
+ import json as json_lib
2
+ from typing import Any, Dict, List, Literal, Optional
3
+
4
+ import click
5
+ from tabulate import tabulate
6
+
7
+ from together import Together, omit
8
+ from together._models import BaseModel
9
+ from together._response import APIResponse as APIResponse
10
+ from together.lib.resources.models import filter_by_dedicated_models
11
+ from together.types.model_upload_response import ModelUploadResponse
12
+
13
+
14
+ @click.group()
15
+ @click.pass_context
16
+ def models(ctx: click.Context) -> None:
17
+ """Models API commands"""
18
+ pass
19
+
20
+
21
+ @models.command()
22
+ @click.option(
23
+ "--type",
24
+ type=click.Choice(["dedicated"]),
25
+ help="Filter models by type (dedicated: models that can be deployed as dedicated endpoints)",
26
+ )
27
+ @click.option(
28
+ "--json",
29
+ is_flag=True,
30
+ help="Output in JSON format",
31
+ )
32
+ @click.pass_context
33
+ def list(ctx: click.Context, type: Optional[str], json: bool) -> None:
34
+ """List models"""
35
+ client: Together = ctx.obj
36
+
37
+ response = client.models.list()
38
+ models_list = response
39
+
40
+ if type == "dedicated":
41
+ models_list = filter_by_dedicated_models(client, models_list)
42
+
43
+ display_list: List[Dict[str, Any]] = []
44
+ model: BaseModel
45
+ for model in models_list:
46
+ display_list.append(
47
+ {
48
+ "ID": model.id,
49
+ "Name": model.display_name,
50
+ "Organization": model.organization,
51
+ "Type": model.type,
52
+ "Context Length": model.context_length,
53
+ "License": model.license,
54
+ "Input per 1M token": model.pricing.input if model.pricing else None,
55
+ "Output per 1M token": model.pricing.output if model.pricing else None,
56
+ }
57
+ )
58
+
59
+ if json:
60
+ click.echo(json_lib.dumps(display_list, indent=2))
61
+ else:
62
+ click.echo(tabulate(display_list, headers="keys", tablefmt="plain"))
63
+
64
+
65
+ @models.command()
66
+ @click.option(
67
+ "--model-name",
68
+ required=True,
69
+ help="The name to give to your uploaded model",
70
+ )
71
+ @click.option(
72
+ "--model-source",
73
+ required=True,
74
+ help="The source location of the model (Hugging Face repo or S3 path)",
75
+ )
76
+ @click.option(
77
+ "--model-type",
78
+ type=click.Choice(["model", "adapter"]),
79
+ default="model",
80
+ help="Whether the model is a full model or an adapter",
81
+ )
82
+ @click.option(
83
+ "--hf-token",
84
+ help="Hugging Face token (if uploading from Hugging Face)",
85
+ )
86
+ @click.option(
87
+ "--description",
88
+ help="A description of your model",
89
+ )
90
+ @click.option(
91
+ "--base-model",
92
+ help="The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type 'adapter'.",
93
+ )
94
+ @click.option(
95
+ "--lora-model",
96
+ help="The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type 'adapter'.",
97
+ )
98
+ @click.option(
99
+ "--json",
100
+ is_flag=True,
101
+ help="Output in JSON format",
102
+ )
103
+ @click.pass_context
104
+ def upload(
105
+ ctx: click.Context,
106
+ model_name: str,
107
+ model_source: str,
108
+ hf_token: Optional[str],
109
+ description: Optional[str],
110
+ base_model: Optional[str],
111
+ lora_model: Optional[str],
112
+ json: bool,
113
+ model_type: Optional[Literal["model", "adapter"]] = "model",
114
+ ) -> None:
115
+ """Upload a custom model or adapter from Hugging Face or S3"""
116
+ client: Together = ctx.obj
117
+
118
+ response: ModelUploadResponse = client.models.upload(
119
+ model_name=model_name,
120
+ model_source=model_source,
121
+ model_type=model_type or omit,
122
+ hf_token=hf_token or omit,
123
+ description=description or omit,
124
+ base_model=base_model or omit,
125
+ lora_model=lora_model or omit,
126
+ )
127
+
128
+ if json:
129
+ click.echo(json_lib.dumps(response.model_dump(), indent=2))
130
+ else:
131
+ click.echo(f"Model upload job created successfully!")
132
+ if response.data.job_id:
133
+ click.echo(f"Job ID: {response.data.job_id}")
134
+ if response.data.x_model_name:
135
+ click.echo(f"Model Name: {response.data.x_model_name}")
136
+ if response.data.x_model_id:
137
+ click.echo(f"Model ID: {response.data.x_model_id}")
138
+ if response.data.x_model_source:
139
+ click.echo(f"Model Source: {response.data.x_model_source}")
140
+ click.echo(f"Message: {response.message}")