together 1.5.17__py3-none-any.whl → 2.0.0a8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. together/__init__.py +101 -63
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +108 -75
  30. together/lib/cli/api/evals.py +588 -0
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +161 -120
  33. together/lib/cli/api/models.py +140 -0
  34. together/{cli → lib/cli}/api/utils.py +6 -7
  35. together/{cli → lib/cli}/cli.py +16 -24
  36. together/{constants.py → lib/constants.py} +17 -12
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +455 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/lib/utils/files.py +628 -0
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -33
  50. together/resources/audio/__init__.py +72 -21
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -122
  53. together/resources/audio/transcriptions.py +282 -0
  54. together/resources/audio/translations.py +256 -0
  55. together/resources/audio/voices.py +135 -0
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +598 -395
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +398 -121
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +256 -108
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +238 -90
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +374 -0
  75. together/types/__init__.py +65 -109
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_job.py +57 -0
  157. together-2.0.0a8.dist-info/METADATA +680 -0
  158. together-2.0.0a8.dist-info/RECORD +164 -0
  159. {together-1.5.17.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
  160. together-2.0.0a8.dist-info/entry_points.txt +2 -0
  161. {together-1.5.17.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
  162. together/abstract/api_requestor.py +0 -729
  163. together/cli/api/chat.py +0 -276
  164. together/cli/api/completions.py +0 -119
  165. together/cli/api/images.py +0 -93
  166. together/cli/api/models.py +0 -55
  167. together/client.py +0 -176
  168. together/error.py +0 -194
  169. together/filemanager.py +0 -389
  170. together/legacy/__init__.py +0 -0
  171. together/legacy/base.py +0 -27
  172. together/legacy/complete.py +0 -93
  173. together/legacy/embeddings.py +0 -27
  174. together/legacy/files.py +0 -146
  175. together/legacy/finetune.py +0 -177
  176. together/legacy/images.py +0 -27
  177. together/legacy/models.py +0 -44
  178. together/resources/batch.py +0 -136
  179. together/resources/code_interpreter.py +0 -82
  180. together/resources/finetune.py +0 -1064
  181. together/together_response.py +0 -50
  182. together/types/abstract.py +0 -26
  183. together/types/audio_speech.py +0 -110
  184. together/types/batch.py +0 -53
  185. together/types/chat_completions.py +0 -197
  186. together/types/code_interpreter.py +0 -57
  187. together/types/common.py +0 -66
  188. together/types/completions.py +0 -107
  189. together/types/embeddings.py +0 -35
  190. together/types/endpoints.py +0 -123
  191. together/types/error.py +0 -16
  192. together/types/files.py +0 -90
  193. together/types/finetune.py +0 -398
  194. together/types/images.py +0 -44
  195. together/types/models.py +0 -45
  196. together/types/rerank.py +0 -43
  197. together/utils/api_helpers.py +0 -124
  198. together/utils/files.py +0 -425
  199. together/version.py +0 -6
  200. together-1.5.17.dist-info/METADATA +0 -525
  201. together-1.5.17.dist-info/RECORD +0 -69
  202. together-1.5.17.dist-info/entry_points.txt +0 -3
  203. /together/{abstract → lib/cli}/__init__.py +0 -0
  204. /together/{cli → lib/cli/api}/__init__.py +0 -0
  205. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -1,31 +1,26 @@
1
1
  from __future__ import annotations
2
2
 
3
- import json
4
3
  import re
4
+ import json
5
+ from typing import Any, Dict, List, Union, Literal
6
+ from pathlib import Path
5
7
  from datetime import datetime, timezone
6
8
  from textwrap import wrap
7
- from typing import Any, Literal
8
9
 
9
10
  import click
10
- from click.core import ParameterSource # type: ignore[attr-defined]
11
11
  from rich import print as rprint
12
12
  from tabulate import tabulate
13
+ from click.core import ParameterSource # type: ignore[attr-defined]
13
14
 
14
15
  from together import Together
15
- from together.cli.api.utils import BOOL_WITH_AUTO, INT_WITH_MAX
16
- from together.types.finetune import (
17
- DownloadCheckpointType,
18
- FinetuneEventType,
19
- FinetuneTrainingLimits,
20
- )
21
- from together.utils import (
22
- finetune_price_to_dollars,
23
- format_timestamp,
24
- log_warn,
25
- log_warn_once,
26
- parse_timestamp,
27
- )
28
-
16
+ from together._types import NOT_GIVEN, NotGiven
17
+ from together.lib.utils import log_warn
18
+ from together.lib.utils.tools import format_timestamp, finetune_price_to_dollars
19
+ from together.lib.cli.api.utils import INT_WITH_MAX, BOOL_WITH_AUTO
20
+ from together.lib.resources.files import DownloadManager
21
+ from together.lib.utils.serializer import datetime_serializer
22
+ from together.types.finetune_response import TrainingTypeFullTrainingType, TrainingTypeLoRaTrainingType
23
+ from together.lib.resources.fine_tuning import get_model_limits
29
24
 
30
25
  _CONFIRMATION_MESSAGE = (
31
26
  "You are about to create a fine-tuning job. "
@@ -36,16 +31,7 @@ _CONFIRMATION_MESSAGE = (
36
31
  "Do you want to proceed?"
37
32
  )
38
33
 
39
-
40
- class DownloadCheckpointTypeChoice(click.Choice):
41
- def __init__(self) -> None:
42
- super().__init__([ct.value for ct in DownloadCheckpointType])
43
-
44
- def convert(
45
- self, value: str, param: click.Parameter | None, ctx: click.Context | None
46
- ) -> DownloadCheckpointType:
47
- value = super().convert(value, param, ctx)
48
- return DownloadCheckpointType(value)
34
+ _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$"
49
35
 
50
36
 
51
37
  @click.group(name="fine-tuning")
@@ -65,19 +51,11 @@ def fine_tuning(ctx: click.Context) -> None:
65
51
  help="Training file ID from Files API",
66
52
  )
67
53
  @click.option("--model", "-m", type=str, help="Base model name")
68
- @click.option(
69
- "--n-epochs", "-ne", type=int, default=1, help="Number of epochs to train for"
70
- )
71
- @click.option(
72
- "--validation-file", type=str, default="", help="Validation file ID from Files API"
73
- )
54
+ @click.option("--n-epochs", "-ne", type=int, default=1, help="Number of epochs to train for")
55
+ @click.option("--validation-file", type=str, default="", help="Validation file ID from Files API")
74
56
  @click.option("--n-evals", type=int, default=0, help="Number of evaluation loops")
75
- @click.option(
76
- "--n-checkpoints", "-c", type=int, default=1, help="Number of checkpoints to save"
77
- )
78
- @click.option(
79
- "--batch-size", "-b", type=INT_WITH_MAX, default="max", help="Train batch size"
80
- )
57
+ @click.option("--n-checkpoints", "-c", type=int, default=1, help="Number of checkpoints to save")
58
+ @click.option("--batch-size", "-b", type=INT_WITH_MAX, default="max", help="Train batch size")
81
59
  @click.option("--learning-rate", "-lr", type=float, default=1e-5, help="Learning rate")
82
60
  @click.option(
83
61
  "--lr-scheduler-type",
@@ -146,18 +124,14 @@ def fine_tuning(ctx: click.Context) -> None:
146
124
  "--dpo-normalize-logratios-by-length",
147
125
  type=bool,
148
126
  default=False,
149
- help=(
150
- "Whether to normalize logratios by sample length "
151
- "(only used when '--training-method' is 'dpo')"
152
- ),
127
+ help=("Whether to normalize logratios by sample length (only used when '--training-method' is 'dpo')"),
153
128
  )
154
129
  @click.option(
155
130
  "--rpo-alpha",
156
131
  type=float,
157
132
  default=None,
158
133
  help=(
159
- "RPO alpha parameter of DPO training to include NLL in the loss "
160
- "(only used when '--training-method' is 'dpo')"
134
+ "RPO alpha parameter of DPO training to include NLL in the loss (only used when '--training-method' is 'dpo')"
161
135
  ),
162
136
  )
163
137
  @click.option(
@@ -200,11 +174,37 @@ def fine_tuning(ctx: click.Context) -> None:
200
174
  "The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}. "
201
175
  "The step value is optional, without it the final checkpoint will be used.",
202
176
  )
177
+ @click.option(
178
+ "--from-hf-model",
179
+ type=str,
180
+ help="The Hugging Face Hub repo to start training from. "
181
+ "Should be as close as possible to the base model (specified by the `model` argument) "
182
+ "in terms of architecture and size",
183
+ )
184
+ @click.option(
185
+ "--hf-model-revision",
186
+ type=str,
187
+ help="The revision of the Hugging Face Hub model to continue training from. "
188
+ "Example: hf_model_revision=None (defaults to the latest revision in `main`) "
189
+ "or hf_model_revision='607a30d783dfa663caf39e06633721c8d4cfcd7e' (specific commit).",
190
+ )
191
+ @click.option(
192
+ "--hf-api-token",
193
+ type=str,
194
+ default=None,
195
+ help="HF API token to use for uploading a checkpoint to a private repo",
196
+ )
197
+ @click.option(
198
+ "--hf-output-repo-name",
199
+ type=str,
200
+ default=None,
201
+ help="HF repo to upload the fine-tuned model to",
202
+ )
203
203
  def create(
204
204
  ctx: click.Context,
205
205
  training_file: str,
206
206
  validation_file: str,
207
- model: str,
207
+ model: str | None,
208
208
  n_epochs: int,
209
209
  n_evals: int,
210
210
  n_checkpoints: int,
@@ -216,24 +216,28 @@ def create(
216
216
  warmup_ratio: float,
217
217
  max_grad_norm: float,
218
218
  weight_decay: float,
219
- lora: bool,
220
- lora_r: int,
221
- lora_dropout: float,
222
- lora_alpha: float,
223
- lora_trainable_modules: str,
224
- suffix: str,
225
- wandb_api_key: str,
226
- wandb_base_url: str,
227
- wandb_project_name: str,
228
- wandb_name: str,
229
- confirm: bool,
230
- train_on_inputs: bool | Literal["auto"],
231
- training_method: str,
219
+ lora: bool | None,
220
+ lora_r: int | None,
221
+ lora_dropout: float | None,
222
+ lora_alpha: float | None,
223
+ lora_trainable_modules: str | None,
224
+ suffix: str | None,
225
+ wandb_api_key: str | None,
226
+ wandb_base_url: str | None,
227
+ wandb_project_name: str | None,
228
+ wandb_name: str | None,
229
+ confirm: bool | None,
230
+ train_on_inputs: bool | Literal["auto"] | None,
231
+ training_method: str | None,
232
232
  dpo_beta: float | None,
233
- dpo_normalize_logratios_by_length: bool,
233
+ dpo_normalize_logratios_by_length: bool | None,
234
234
  rpo_alpha: float | None,
235
235
  simpo_gamma: float | None,
236
- from_checkpoint: str,
236
+ from_checkpoint: str | None,
237
+ from_hf_model: str | None,
238
+ hf_model_revision: str | None,
239
+ hf_api_token: str | None,
240
+ hf_output_repo_name: str | None,
237
241
  ) -> None:
238
242
  """Start fine-tuning"""
239
243
  client: Together = ctx.obj
@@ -270,6 +274,10 @@ def create(
270
274
  rpo_alpha=rpo_alpha,
271
275
  simpo_gamma=simpo_gamma,
272
276
  from_checkpoint=from_checkpoint,
277
+ from_hf_model=from_hf_model,
278
+ hf_model_revision=hf_model_revision,
279
+ hf_api_token=hf_api_token,
280
+ hf_output_repo_name=hf_output_repo_name,
273
281
  )
274
282
 
275
283
  if model is None and from_checkpoint is None:
@@ -279,37 +287,26 @@ def create(
279
287
  if from_checkpoint is not None:
280
288
  model_name = from_checkpoint.split(":")[0]
281
289
 
282
- model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
283
- model=model_name
284
- )
290
+ model_limits = get_model_limits(client, str(model_name))
285
291
 
286
292
  if lora:
287
293
  if model_limits.lora_training is None:
288
- raise click.BadParameter(
289
- f"LoRA fine-tuning is not supported for the model `{model}`"
290
- )
291
- if training_method == "dpo":
292
- default_batch_size = model_limits.lora_training.max_batch_size_dpo
293
- else:
294
- default_batch_size = model_limits.lora_training.max_batch_size
294
+ raise click.BadParameter(f"LoRA fine-tuning is not supported for the model `{model}`")
295
295
  default_values = {
296
296
  "lora_r": model_limits.lora_training.max_rank,
297
- "batch_size": default_batch_size,
298
297
  "learning_rate": 1e-3,
299
298
  }
300
299
 
301
300
  for arg in default_values:
302
301
  arg_source = ctx.get_parameter_source("arg") # type: ignore[attr-defined]
303
302
  if arg_source == ParameterSource.DEFAULT:
304
- training_args[arg] = default_values[arg_source]
303
+ training_args[arg] = default_values[str(arg_source)]
305
304
 
306
305
  if ctx.get_parameter_source("lora_alpha") == ParameterSource.DEFAULT: # type: ignore[attr-defined]
307
306
  training_args["lora_alpha"] = training_args["lora_r"] * 2
308
307
  else:
309
308
  if model_limits.full_training is None:
310
- raise click.BadParameter(
311
- f"Full fine-tuning is not supported for the model `{model}`"
312
- )
309
+ raise click.BadParameter(f"Full fine-tuning is not supported for the model `{model}`")
313
310
 
314
311
  for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
315
312
  param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined]
@@ -319,23 +316,12 @@ def create(
319
316
  f"Please change the job type with --lora or remove `{param}` from the arguments"
320
317
  )
321
318
 
322
- batch_size_source = ctx.get_parameter_source("batch_size") # type: ignore[attr-defined]
323
- if batch_size_source == ParameterSource.DEFAULT:
324
- if training_method == "dpo":
325
- training_args["batch_size"] = (
326
- model_limits.full_training.max_batch_size_dpo
327
- )
328
- else:
329
- training_args["batch_size"] = model_limits.full_training.max_batch_size
330
-
331
319
  if n_evals <= 0 and validation_file:
332
320
  log_warn(
333
321
  "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
334
322
  )
335
323
  elif n_evals > 0 and not validation_file:
336
- raise click.BadParameter(
337
- "You have specified a number of evaluation loops but no validation file."
338
- )
324
+ raise click.BadParameter("You have specified a number of evaluation loops but no validation file.")
339
325
 
340
326
  if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True):
341
327
  response = client.fine_tuning.create(
@@ -344,13 +330,9 @@ def create(
344
330
  )
345
331
 
346
332
  report_string = f"Successfully submitted a fine-tuning job {response.id}"
347
- if response.created_at is not None:
348
- created_time = datetime.strptime(
349
- response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z"
350
- )
351
- # created_at reports UTC time, we use .astimezone() to convert to local time
352
- formatted_time = created_time.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
353
- report_string += f" at {formatted_time}"
333
+ # created_at reports UTC time, we use .astimezone() to convert to local time
334
+ formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
335
+ report_string += f" at {formatted_time}"
354
336
  rprint(report_string)
355
337
  else:
356
338
  click.echo("No confirmation received, stopping job launch")
@@ -368,14 +350,14 @@ def list(ctx: click.Context) -> None:
368
350
 
369
351
  # Use a default datetime for None values to make sure the key function always returns a comparable value
370
352
  epoch_start = datetime.fromtimestamp(0, tz=timezone.utc)
371
- response.data.sort(key=lambda x: parse_timestamp(x.created_at or "") or epoch_start)
353
+ response.data.sort(key=lambda x: x.created_at or epoch_start)
372
354
 
373
- display_list = []
355
+ display_list: List[Dict[str, Any]] = []
374
356
  for i in response.data:
375
357
  display_list.append(
376
358
  {
377
359
  "Fine-tune ID": i.id,
378
- "Model Output Name": "\n".join(wrap(i.output_name or "", width=30)),
360
+ "Model Output Name": "\n".join(wrap(i.x_model_output_name or "", width=30)),
379
361
  "Status": i.status,
380
362
  "Created At": i.created_at,
381
363
  "Price": f"""${
@@ -406,9 +388,7 @@ def retrieve(ctx: click.Context, fine_tune_id: str) -> None:
406
388
  @fine_tuning.command()
407
389
  @click.pass_context
408
390
  @click.argument("fine_tune_id", type=str, required=True)
409
- @click.option(
410
- "--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job"
411
- )
391
+ @click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job")
412
392
  def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None:
413
393
  """Cancel fine-tuning job"""
414
394
  client: Together = ctx.obj
@@ -422,7 +402,7 @@ def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None:
422
402
  return
423
403
  response = client.fine_tuning.cancel(fine_tune_id)
424
404
 
425
- click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
405
+ click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4, default=datetime_serializer))
426
406
 
427
407
 
428
408
  @fine_tuning.command()
@@ -436,13 +416,13 @@ def list_events(ctx: click.Context, fine_tune_id: str) -> None:
436
416
 
437
417
  response.data = response.data or []
438
418
 
439
- display_list = []
419
+ display_list: List[Dict[str, Any]] = []
440
420
  for i in response.data:
441
421
  display_list.append(
442
422
  {
443
423
  "Message": "\n".join(wrap(i.message or "", width=50)),
444
424
  "Type": i.type,
445
- "Created At": parse_timestamp(i.created_at or ""),
425
+ "Created At": i.created_at,
446
426
  "Hash": i.hash,
447
427
  }
448
428
  )
@@ -460,13 +440,18 @@ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
460
440
 
461
441
  checkpoints = client.fine_tuning.list_checkpoints(fine_tune_id)
462
442
 
463
- display_list = []
464
- for checkpoint in checkpoints:
443
+ display_list: List[Dict[str, Any]] = []
444
+ for checkpoint in checkpoints.data:
445
+ name = (
446
+ f"{fine_tune_id}:{checkpoint.step}"
447
+ if "intermediate" in checkpoint.checkpoint_type.lower()
448
+ else fine_tune_id
449
+ )
465
450
  display_list.append(
466
451
  {
467
- "Type": checkpoint.type,
468
- "Timestamp": format_timestamp(checkpoint.timestamp),
469
- "Name": checkpoint.name,
452
+ "Type": checkpoint.checkpoint_type,
453
+ "Timestamp": format_timestamp(checkpoint.created_at),
454
+ "Name": name,
470
455
  }
471
456
  )
472
457
 
@@ -479,7 +464,7 @@ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
479
464
  click.echo(f"No checkpoints found for job {fine_tune_id}")
480
465
 
481
466
 
482
- @fine_tuning.command()
467
+ @fine_tuning.command(name="download")
483
468
  @click.pass_context
484
469
  @click.argument("fine_tune_id", type=str, required=True)
485
470
  @click.option(
@@ -500,26 +485,82 @@ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
500
485
  )
501
486
  @click.option(
502
487
  "--checkpoint-type",
503
- type=DownloadCheckpointTypeChoice(),
488
+ type=click.Choice(["merged", "adapter", "default"]),
504
489
  required=False,
505
- default=DownloadCheckpointType.DEFAULT.value,
490
+ default="merged",
506
491
  help="Specifies checkpoint type. 'merged' and 'adapter' options work only for LoRA jobs.",
507
492
  )
508
493
  def download(
509
494
  ctx: click.Context,
510
495
  fine_tune_id: str,
511
- output_dir: str,
512
- checkpoint_step: int | None,
513
- checkpoint_type: DownloadCheckpointType,
496
+ output_dir: str | None = None,
497
+ checkpoint_step: Union[int, NotGiven] = NOT_GIVEN,
498
+ checkpoint_type: Literal["default", "merged", "adapter"] | NotGiven = NOT_GIVEN,
514
499
  ) -> None:
515
500
  """Download fine-tuning checkpoint"""
516
501
  client: Together = ctx.obj
517
502
 
518
- response = client.fine_tuning.download(
519
- fine_tune_id,
520
- output=output_dir,
521
- checkpoint_step=checkpoint_step,
522
- checkpoint_type=checkpoint_type,
503
+ if re.match(_FT_JOB_WITH_STEP_REGEX, fine_tune_id) is not None:
504
+ if checkpoint_step is NOT_GIVEN:
505
+ checkpoint_step = int(fine_tune_id.split(":")[1])
506
+ fine_tune_id = fine_tune_id.split(":")[0]
507
+ else:
508
+ raise ValueError(
509
+ "Fine-tuning job ID {fine_tune_id} contains a colon to specify the step to download, but `checkpoint_step` "
510
+ "was also set. Remove one of the step specifiers to proceed."
511
+ )
512
+
513
+ ft_job = client.fine_tuning.retrieve(fine_tune_id)
514
+
515
+ loosely_typed_checkpoint_type: str | NotGiven = checkpoint_type
516
+ if isinstance(ft_job.training_type, TrainingTypeFullTrainingType):
517
+ if checkpoint_type != "default":
518
+ raise ValueError("Only DEFAULT checkpoint type is allowed for FullTrainingType")
519
+ loosely_typed_checkpoint_type = "model_output_path"
520
+ elif isinstance(ft_job.training_type, TrainingTypeLoRaTrainingType):
521
+ if checkpoint_type == "default":
522
+ loosely_typed_checkpoint_type = "merged"
523
+
524
+ if checkpoint_type not in {
525
+ "merged",
526
+ "adapter",
527
+ }:
528
+ raise ValueError(f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}")
529
+
530
+ remote_name = ft_job.x_model_output_name
531
+
532
+ url = f"/finetune/download?ft_id={fine_tune_id}&checkpoint={loosely_typed_checkpoint_type}"
533
+ output: Path | None = None
534
+ if isinstance(output_dir, str):
535
+ output = Path(output_dir)
536
+
537
+ file_path, file_size = DownloadManager(client).download(
538
+ url=url,
539
+ output=output,
540
+ remote_name=remote_name,
541
+ fetch_metadata=True,
523
542
  )
524
543
 
544
+ click.echo(json.dumps({"object": "local", "id": fine_tune_id, "filename": file_path, "size": file_size}, indent=4))
545
+
546
+
547
+ @fine_tuning.command()
548
+ @click.pass_context
549
+ @click.argument("fine_tune_id", type=str, required=True)
550
+ @click.option("--force", is_flag=True, help="Force deletion without confirmation")
551
+ @click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before deleting job")
552
+ def delete(ctx: click.Context, fine_tune_id: str, force: bool = False, quiet: bool = False) -> None:
553
+ """Delete fine-tuning job"""
554
+ client: Together = ctx.obj
555
+
556
+ if not quiet:
557
+ confirm_response = input(
558
+ f"Are you sure you want to delete fine-tuning job {fine_tune_id}? This action cannot be undone. [y/N] "
559
+ )
560
+ if confirm_response.lower() != "y":
561
+ click.echo("Deletion cancelled")
562
+ return
563
+
564
+ response = client.fine_tuning.delete(fine_tune_id, force=force)
565
+
525
566
  click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
@@ -0,0 +1,140 @@
1
+ import json as json_lib
2
+ from typing import Any, Dict, List, Literal, Optional
3
+
4
+ import click
5
+ from tabulate import tabulate
6
+
7
+ from together import Together, omit
8
+ from together._models import BaseModel
9
+ from together._response import APIResponse as APIResponse
10
+ from together.lib.resources.models import filter_by_dedicated_models
11
+ from together.types.model_upload_response import ModelUploadResponse
12
+
13
+
14
+ @click.group()
15
+ @click.pass_context
16
+ def models(ctx: click.Context) -> None:
17
+ """Models API commands"""
18
+ pass
19
+
20
+
21
+ @models.command()
22
+ @click.option(
23
+ "--type",
24
+ type=click.Choice(["dedicated"]),
25
+ help="Filter models by type (dedicated: models that can be deployed as dedicated endpoints)",
26
+ )
27
+ @click.option(
28
+ "--json",
29
+ is_flag=True,
30
+ help="Output in JSON format",
31
+ )
32
+ @click.pass_context
33
+ def list(ctx: click.Context, type: Optional[str], json: bool) -> None:
34
+ """List models"""
35
+ client: Together = ctx.obj
36
+
37
+ response = client.models.list()
38
+ models_list = response
39
+
40
+ if type == "dedicated":
41
+ models_list = filter_by_dedicated_models(client, models_list)
42
+
43
+ display_list: List[Dict[str, Any]] = []
44
+ model: BaseModel
45
+ for model in models_list:
46
+ display_list.append(
47
+ {
48
+ "ID": model.id,
49
+ "Name": model.display_name,
50
+ "Organization": model.organization,
51
+ "Type": model.type,
52
+ "Context Length": model.context_length,
53
+ "License": model.license,
54
+ "Input per 1M token": model.pricing.input if model.pricing else None,
55
+ "Output per 1M token": model.pricing.output if model.pricing else None,
56
+ }
57
+ )
58
+
59
+ if json:
60
+ click.echo(json_lib.dumps(display_list, indent=2))
61
+ else:
62
+ click.echo(tabulate(display_list, headers="keys", tablefmt="plain"))
63
+
64
+
65
+ @models.command()
66
+ @click.option(
67
+ "--model-name",
68
+ required=True,
69
+ help="The name to give to your uploaded model",
70
+ )
71
+ @click.option(
72
+ "--model-source",
73
+ required=True,
74
+ help="The source location of the model (Hugging Face repo or S3 path)",
75
+ )
76
+ @click.option(
77
+ "--model-type",
78
+ type=click.Choice(["model", "adapter"]),
79
+ default="model",
80
+ help="Whether the model is a full model or an adapter",
81
+ )
82
+ @click.option(
83
+ "--hf-token",
84
+ help="Hugging Face token (if uploading from Hugging Face)",
85
+ )
86
+ @click.option(
87
+ "--description",
88
+ help="A description of your model",
89
+ )
90
+ @click.option(
91
+ "--base-model",
92
+ help="The base model to use for an adapter if setting it to run against a serverless pool. Only used for model_type 'adapter'.",
93
+ )
94
+ @click.option(
95
+ "--lora-model",
96
+ help="The lora pool to use for an adapter if setting it to run against, say, a dedicated pool. Only used for model_type 'adapter'.",
97
+ )
98
+ @click.option(
99
+ "--json",
100
+ is_flag=True,
101
+ help="Output in JSON format",
102
+ )
103
+ @click.pass_context
104
+ def upload(
105
+ ctx: click.Context,
106
+ model_name: str,
107
+ model_source: str,
108
+ hf_token: Optional[str],
109
+ description: Optional[str],
110
+ base_model: Optional[str],
111
+ lora_model: Optional[str],
112
+ json: bool,
113
+ model_type: Optional[Literal["model", "adapter"]] = "model",
114
+ ) -> None:
115
+ """Upload a custom model or adapter from Hugging Face or S3"""
116
+ client: Together = ctx.obj
117
+
118
+ response: ModelUploadResponse = client.models.upload(
119
+ model_name=model_name,
120
+ model_source=model_source,
121
+ model_type=model_type or omit,
122
+ hf_token=hf_token or omit,
123
+ description=description or omit,
124
+ base_model=base_model or omit,
125
+ lora_model=lora_model or omit,
126
+ )
127
+
128
+ if json:
129
+ click.echo(json_lib.dumps(response.model_dump(), indent=2))
130
+ else:
131
+ click.echo(f"Model upload job created successfully!")
132
+ if response.data.job_id:
133
+ click.echo(f"Job ID: {response.data.job_id}")
134
+ if response.data.x_model_name:
135
+ click.echo(f"Model Name: {response.data.x_model_name}")
136
+ if response.data.x_model_id:
137
+ click.echo(f"Model ID: {response.data.x_model_id}")
138
+ if response.data.x_model_source:
139
+ click.echo(f"Model Source: {response.data.x_model_source}")
140
+ click.echo(f"Message: {response.message}")
@@ -1,7 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
- from gettext import gettext as _
4
3
  from typing import Literal
4
+ from gettext import gettext as _
5
+ from typing_extensions import override
5
6
 
6
7
  import click
7
8
 
@@ -10,6 +11,7 @@ class AutoIntParamType(click.ParamType):
10
11
  name = "integer_or_max"
11
12
  _number_class = int
12
13
 
14
+ @override
13
15
  def convert(
14
16
  self, value: str, param: click.Parameter | None, ctx: click.Context | None
15
17
  ) -> int | Literal["max"] | None:
@@ -19,9 +21,7 @@ class AutoIntParamType(click.ParamType):
19
21
  return int(value)
20
22
  except ValueError:
21
23
  self.fail(
22
- _("{value!r} is not a valid {number_type}.").format(
23
- value=value, number_type=self.name
24
- ),
24
+ _("{value!r} is not a valid {number_type}.").format(value=value, number_type=self.name),
25
25
  param,
26
26
  ctx,
27
27
  )
@@ -30,6 +30,7 @@ class AutoIntParamType(click.ParamType):
30
30
  class BooleanWithAutoParamType(click.ParamType):
31
31
  name = "boolean_or_auto"
32
32
 
33
+ @override
33
34
  def convert(
34
35
  self, value: str, param: click.Parameter | None, ctx: click.Context | None
35
36
  ) -> bool | Literal["auto"] | None:
@@ -39,9 +40,7 @@ class BooleanWithAutoParamType(click.ParamType):
39
40
  return bool(value)
40
41
  except ValueError:
41
42
  self.fail(
42
- _("{value!r} is not a valid {type}.").format(
43
- value=value, type=self.name
44
- ),
43
+ _("{value!r} is not a valid {type}.").format(value=value, type=self.name),
45
44
  param,
46
45
  ctx,
47
46
  )