together 1.5.34__py3-none-any.whl → 2.0.0a6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. together/__init__.py +101 -114
  2. together/_base_client.py +1995 -0
  3. together/_client.py +1033 -0
  4. together/_compat.py +219 -0
  5. together/_constants.py +14 -0
  6. together/_exceptions.py +108 -0
  7. together/_files.py +123 -0
  8. together/_models.py +857 -0
  9. together/_qs.py +150 -0
  10. together/_resource.py +43 -0
  11. together/_response.py +830 -0
  12. together/_streaming.py +370 -0
  13. together/_types.py +260 -0
  14. together/_utils/__init__.py +64 -0
  15. together/_utils/_compat.py +45 -0
  16. together/_utils/_datetime_parse.py +136 -0
  17. together/_utils/_logs.py +25 -0
  18. together/_utils/_proxy.py +65 -0
  19. together/_utils/_reflection.py +42 -0
  20. together/_utils/_resources_proxy.py +24 -0
  21. together/_utils/_streams.py +12 -0
  22. together/_utils/_sync.py +58 -0
  23. together/_utils/_transform.py +457 -0
  24. together/_utils/_typing.py +156 -0
  25. together/_utils/_utils.py +421 -0
  26. together/_version.py +4 -0
  27. together/lib/.keep +4 -0
  28. together/lib/__init__.py +23 -0
  29. together/{cli → lib/cli}/api/endpoints.py +65 -81
  30. together/{cli/api/evaluation.py → lib/cli/api/evals.py} +152 -43
  31. together/{cli → lib/cli}/api/files.py +20 -17
  32. together/{cli/api/finetune.py → lib/cli/api/fine_tuning.py} +116 -172
  33. together/{cli → lib/cli}/api/models.py +34 -27
  34. together/lib/cli/api/utils.py +50 -0
  35. together/{cli → lib/cli}/cli.py +16 -26
  36. together/{constants.py → lib/constants.py} +11 -24
  37. together/lib/resources/__init__.py +11 -0
  38. together/lib/resources/files.py +999 -0
  39. together/lib/resources/fine_tuning.py +280 -0
  40. together/lib/resources/models.py +35 -0
  41. together/lib/types/__init__.py +13 -0
  42. together/lib/types/error.py +9 -0
  43. together/lib/types/fine_tuning.py +397 -0
  44. together/{utils → lib/utils}/__init__.py +6 -14
  45. together/{utils → lib/utils}/_log.py +11 -16
  46. together/{utils → lib/utils}/files.py +90 -288
  47. together/lib/utils/serializer.py +10 -0
  48. together/{utils → lib/utils}/tools.py +19 -55
  49. together/resources/__init__.py +225 -39
  50. together/resources/audio/__init__.py +72 -48
  51. together/resources/audio/audio.py +198 -0
  52. together/resources/audio/speech.py +574 -128
  53. together/resources/audio/transcriptions.py +247 -261
  54. together/resources/audio/translations.py +221 -241
  55. together/resources/audio/voices.py +111 -41
  56. together/resources/batches.py +417 -0
  57. together/resources/chat/__init__.py +30 -21
  58. together/resources/chat/chat.py +102 -0
  59. together/resources/chat/completions.py +1063 -263
  60. together/resources/code_interpreter/__init__.py +33 -0
  61. together/resources/code_interpreter/code_interpreter.py +258 -0
  62. together/resources/code_interpreter/sessions.py +135 -0
  63. together/resources/completions.py +884 -225
  64. together/resources/embeddings.py +172 -68
  65. together/resources/endpoints.py +589 -477
  66. together/resources/evals.py +452 -0
  67. together/resources/files.py +397 -129
  68. together/resources/fine_tuning.py +1033 -0
  69. together/resources/hardware.py +181 -0
  70. together/resources/images.py +258 -104
  71. together/resources/jobs.py +214 -0
  72. together/resources/models.py +223 -193
  73. together/resources/rerank.py +190 -92
  74. together/resources/videos.py +286 -214
  75. together/types/__init__.py +66 -167
  76. together/types/audio/__init__.py +10 -0
  77. together/types/audio/speech_create_params.py +75 -0
  78. together/types/audio/transcription_create_params.py +54 -0
  79. together/types/audio/transcription_create_response.py +111 -0
  80. together/types/audio/translation_create_params.py +40 -0
  81. together/types/audio/translation_create_response.py +70 -0
  82. together/types/audio/voice_list_response.py +23 -0
  83. together/types/audio_speech_stream_chunk.py +16 -0
  84. together/types/autoscaling.py +13 -0
  85. together/types/autoscaling_param.py +15 -0
  86. together/types/batch_create_params.py +24 -0
  87. together/types/batch_create_response.py +14 -0
  88. together/types/batch_job.py +45 -0
  89. together/types/batch_list_response.py +10 -0
  90. together/types/chat/__init__.py +18 -0
  91. together/types/chat/chat_completion.py +60 -0
  92. together/types/chat/chat_completion_chunk.py +61 -0
  93. together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
  94. together/types/chat/chat_completion_structured_message_text_param.py +13 -0
  95. together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
  96. together/types/chat/chat_completion_usage.py +13 -0
  97. together/types/chat/chat_completion_warning.py +9 -0
  98. together/types/chat/completion_create_params.py +329 -0
  99. together/types/code_interpreter/__init__.py +5 -0
  100. together/types/code_interpreter/session_list_response.py +31 -0
  101. together/types/code_interpreter_execute_params.py +45 -0
  102. together/types/completion.py +42 -0
  103. together/types/completion_chunk.py +66 -0
  104. together/types/completion_create_params.py +138 -0
  105. together/types/dedicated_endpoint.py +44 -0
  106. together/types/embedding.py +24 -0
  107. together/types/embedding_create_params.py +31 -0
  108. together/types/endpoint_create_params.py +43 -0
  109. together/types/endpoint_list_avzones_response.py +11 -0
  110. together/types/endpoint_list_params.py +18 -0
  111. together/types/endpoint_list_response.py +41 -0
  112. together/types/endpoint_update_params.py +27 -0
  113. together/types/eval_create_params.py +263 -0
  114. together/types/eval_create_response.py +16 -0
  115. together/types/eval_list_params.py +21 -0
  116. together/types/eval_list_response.py +10 -0
  117. together/types/eval_status_response.py +100 -0
  118. together/types/evaluation_job.py +139 -0
  119. together/types/execute_response.py +108 -0
  120. together/types/file_delete_response.py +13 -0
  121. together/types/file_list.py +12 -0
  122. together/types/file_purpose.py +9 -0
  123. together/types/file_response.py +31 -0
  124. together/types/file_type.py +7 -0
  125. together/types/fine_tuning_cancel_response.py +194 -0
  126. together/types/fine_tuning_content_params.py +24 -0
  127. together/types/fine_tuning_delete_params.py +11 -0
  128. together/types/fine_tuning_delete_response.py +12 -0
  129. together/types/fine_tuning_list_checkpoints_response.py +21 -0
  130. together/types/fine_tuning_list_events_response.py +12 -0
  131. together/types/fine_tuning_list_response.py +199 -0
  132. together/types/finetune_event.py +41 -0
  133. together/types/finetune_event_type.py +33 -0
  134. together/types/finetune_response.py +177 -0
  135. together/types/hardware_list_params.py +16 -0
  136. together/types/hardware_list_response.py +58 -0
  137. together/types/image_data_b64.py +15 -0
  138. together/types/image_data_url.py +15 -0
  139. together/types/image_file.py +23 -0
  140. together/types/image_generate_params.py +85 -0
  141. together/types/job_list_response.py +47 -0
  142. together/types/job_retrieve_response.py +43 -0
  143. together/types/log_probs.py +18 -0
  144. together/types/model_list_response.py +10 -0
  145. together/types/model_object.py +42 -0
  146. together/types/model_upload_params.py +36 -0
  147. together/types/model_upload_response.py +23 -0
  148. together/types/rerank_create_params.py +36 -0
  149. together/types/rerank_create_response.py +36 -0
  150. together/types/tool_choice.py +23 -0
  151. together/types/tool_choice_param.py +23 -0
  152. together/types/tools_param.py +23 -0
  153. together/types/training_method_dpo.py +22 -0
  154. together/types/training_method_sft.py +18 -0
  155. together/types/video_create_params.py +86 -0
  156. together/types/video_create_response.py +10 -0
  157. together/types/video_job.py +57 -0
  158. together-2.0.0a6.dist-info/METADATA +729 -0
  159. together-2.0.0a6.dist-info/RECORD +165 -0
  160. {together-1.5.34.dist-info → together-2.0.0a6.dist-info}/WHEEL +1 -1
  161. together-2.0.0a6.dist-info/entry_points.txt +2 -0
  162. {together-1.5.34.dist-info → together-2.0.0a6.dist-info}/licenses/LICENSE +1 -1
  163. together/abstract/api_requestor.py +0 -770
  164. together/cli/api/chat.py +0 -298
  165. together/cli/api/completions.py +0 -119
  166. together/cli/api/images.py +0 -93
  167. together/cli/api/utils.py +0 -139
  168. together/client.py +0 -186
  169. together/error.py +0 -194
  170. together/filemanager.py +0 -635
  171. together/legacy/__init__.py +0 -0
  172. together/legacy/base.py +0 -27
  173. together/legacy/complete.py +0 -93
  174. together/legacy/embeddings.py +0 -27
  175. together/legacy/files.py +0 -146
  176. together/legacy/finetune.py +0 -177
  177. together/legacy/images.py +0 -27
  178. together/legacy/models.py +0 -44
  179. together/resources/batch.py +0 -165
  180. together/resources/code_interpreter.py +0 -82
  181. together/resources/evaluation.py +0 -808
  182. together/resources/finetune.py +0 -1388
  183. together/together_response.py +0 -50
  184. together/types/abstract.py +0 -26
  185. together/types/audio_speech.py +0 -311
  186. together/types/batch.py +0 -54
  187. together/types/chat_completions.py +0 -210
  188. together/types/code_interpreter.py +0 -57
  189. together/types/common.py +0 -67
  190. together/types/completions.py +0 -107
  191. together/types/embeddings.py +0 -35
  192. together/types/endpoints.py +0 -123
  193. together/types/error.py +0 -16
  194. together/types/evaluation.py +0 -93
  195. together/types/files.py +0 -93
  196. together/types/finetune.py +0 -464
  197. together/types/images.py +0 -42
  198. together/types/models.py +0 -96
  199. together/types/rerank.py +0 -43
  200. together/types/videos.py +0 -69
  201. together/utils/api_helpers.py +0 -124
  202. together/version.py +0 -6
  203. together-1.5.34.dist-info/METADATA +0 -583
  204. together-1.5.34.dist-info/RECORD +0 -77
  205. together-1.5.34.dist-info/entry_points.txt +0 -3
  206. /together/{abstract → lib/cli}/__init__.py +0 -0
  207. /together/{cli → lib/cli/api}/__init__.py +0 -0
  208. /together/{cli/api/__init__.py → py.typed} +0 -0
@@ -1,54 +1,37 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import re
3
4
  import json
5
+ from typing import Any, Dict, List, Union, Literal
6
+ from pathlib import Path
4
7
  from datetime import datetime, timezone
5
8
  from textwrap import wrap
6
- from typing import Any, Literal
7
9
 
8
10
  import click
9
- from click.core import ParameterSource # type: ignore[attr-defined]
10
11
  from rich import print as rprint
11
- from rich.json import JSON
12
12
  from tabulate import tabulate
13
+ from click.core import ParameterSource # type: ignore[attr-defined]
13
14
 
14
15
  from together import Together
15
- from together.cli.api.utils import BOOL_WITH_AUTO, INT_WITH_MAX, generate_progress_bar
16
- from together.types.finetune import DownloadCheckpointType, FinetuneTrainingLimits
17
- from together.utils import (
18
- finetune_price_to_dollars,
19
- format_timestamp,
20
- log_warn,
21
- parse_timestamp,
22
- )
23
-
16
+ from together._types import NOT_GIVEN, NotGiven
17
+ from together.lib.utils import log_warn
18
+ from together.lib.utils.tools import format_timestamp, finetune_price_to_dollars
19
+ from together.lib.cli.api.utils import INT_WITH_MAX, BOOL_WITH_AUTO
20
+ from together.lib.resources.files import DownloadManager
21
+ from together.lib.utils.serializer import datetime_serializer
22
+ from together.types.finetune_response import TrainingTypeFullTrainingType, TrainingTypeLoRaTrainingType
23
+ from together.lib.resources.fine_tuning import get_model_limits
24
24
 
25
25
  _CONFIRMATION_MESSAGE = (
26
26
  "You are about to create a fine-tuning job. "
27
- "The estimated price of this job is {price}. "
28
- "The actual cost of your job will be determined by the model size, the number of tokens "
27
+ "The cost of your job will be determined by the model size, the number of tokens "
29
28
  "in the training file, the number of tokens in the validation file, the number of epochs, and "
30
- "the number of evaluations. Visit https://www.together.ai/pricing to learn more about fine-tuning pricing.\n"
31
- "{warning}"
29
+ "the number of evaluations. Visit https://www.together.ai/pricing to get a price estimate.\n"
32
30
  "You can pass `-y` or `--confirm` to your command to skip this message.\n\n"
33
31
  "Do you want to proceed?"
34
32
  )
35
33
 
36
- _WARNING_MESSAGE_INSUFFICIENT_FUNDS = (
37
- "The estimated price of this job is significantly greater than your current credit limit and balance combined. "
38
- "It will likely get cancelled due to insufficient funds. "
39
- "Consider increasing your credit limit at https://api.together.xyz/settings/profile\n"
40
- )
41
-
42
-
43
- class DownloadCheckpointTypeChoice(click.Choice):
44
- def __init__(self) -> None:
45
- super().__init__([ct.value for ct in DownloadCheckpointType])
46
-
47
- def convert(
48
- self, value: str, param: click.Parameter | None, ctx: click.Context | None
49
- ) -> DownloadCheckpointType:
50
- value = super().convert(value, param, ctx)
51
- return DownloadCheckpointType(value)
34
+ _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$"
52
35
 
53
36
 
54
37
  @click.group(name="fine-tuning")
@@ -68,19 +51,11 @@ def fine_tuning(ctx: click.Context) -> None:
68
51
  help="Training file ID from Files API",
69
52
  )
70
53
  @click.option("--model", "-m", type=str, help="Base model name")
71
- @click.option(
72
- "--n-epochs", "-ne", type=int, default=1, help="Number of epochs to train for"
73
- )
74
- @click.option(
75
- "--validation-file", type=str, default="", help="Validation file ID from Files API"
76
- )
54
+ @click.option("--n-epochs", "-ne", type=int, default=1, help="Number of epochs to train for")
55
+ @click.option("--validation-file", type=str, default="", help="Validation file ID from Files API")
77
56
  @click.option("--n-evals", type=int, default=0, help="Number of evaluation loops")
78
- @click.option(
79
- "--n-checkpoints", "-c", type=int, default=1, help="Number of checkpoints to save"
80
- )
81
- @click.option(
82
- "--batch-size", "-b", type=INT_WITH_MAX, default="max", help="Train batch size"
83
- )
57
+ @click.option("--n-checkpoints", "-c", type=int, default=1, help="Number of checkpoints to save")
58
+ @click.option("--batch-size", "-b", type=INT_WITH_MAX, default="max", help="Train batch size")
84
59
  @click.option("--learning-rate", "-lr", type=float, default=1e-5, help="Learning rate")
85
60
  @click.option(
86
61
  "--lr-scheduler-type",
@@ -149,18 +124,14 @@ def fine_tuning(ctx: click.Context) -> None:
149
124
  "--dpo-normalize-logratios-by-length",
150
125
  type=bool,
151
126
  default=False,
152
- help=(
153
- "Whether to normalize logratios by sample length "
154
- "(only used when '--training-method' is 'dpo')"
155
- ),
127
+ help=("Whether to normalize logratios by sample length (only used when '--training-method' is 'dpo')"),
156
128
  )
157
129
  @click.option(
158
130
  "--rpo-alpha",
159
131
  type=float,
160
132
  default=None,
161
133
  help=(
162
- "RPO alpha parameter of DPO training to include NLL in the loss "
163
- "(only used when '--training-method' is 'dpo')"
134
+ "RPO alpha parameter of DPO training to include NLL in the loss (only used when '--training-method' is 'dpo')"
164
135
  ),
165
136
  )
166
137
  @click.option(
@@ -195,12 +166,6 @@ def fine_tuning(ctx: click.Context) -> None:
195
166
  help="Whether to mask the user messages in conversational data or prompts in instruction data. "
196
167
  "`auto` will automatically determine whether to mask the inputs based on the data format.",
197
168
  )
198
- @click.option(
199
- "--train-vision",
200
- type=bool,
201
- default=False,
202
- help="Whether to train the vision encoder. Only supported for multimodal models.",
203
- )
204
169
  @click.option(
205
170
  "--from-checkpoint",
206
171
  type=str,
@@ -239,7 +204,7 @@ def create(
239
204
  ctx: click.Context,
240
205
  training_file: str,
241
206
  validation_file: str,
242
- model: str,
207
+ model: str | None,
243
208
  n_epochs: int,
244
209
  n_evals: int,
245
210
  n_checkpoints: int,
@@ -251,27 +216,26 @@ def create(
251
216
  warmup_ratio: float,
252
217
  max_grad_norm: float,
253
218
  weight_decay: float,
254
- lora: bool,
255
- lora_r: int,
256
- lora_dropout: float,
257
- lora_alpha: float,
258
- lora_trainable_modules: str,
259
- train_vision: bool,
260
- suffix: str,
261
- wandb_api_key: str,
262
- wandb_base_url: str,
263
- wandb_project_name: str,
264
- wandb_name: str,
265
- confirm: bool,
266
- train_on_inputs: bool | Literal["auto"],
267
- training_method: str,
219
+ lora: bool | None,
220
+ lora_r: int | None,
221
+ lora_dropout: float | None,
222
+ lora_alpha: float | None,
223
+ lora_trainable_modules: str | None,
224
+ suffix: str | None,
225
+ wandb_api_key: str | None,
226
+ wandb_base_url: str | None,
227
+ wandb_project_name: str | None,
228
+ wandb_name: str | None,
229
+ confirm: bool | None,
230
+ train_on_inputs: bool | Literal["auto"] | None,
231
+ training_method: str | None,
268
232
  dpo_beta: float | None,
269
- dpo_normalize_logratios_by_length: bool,
233
+ dpo_normalize_logratios_by_length: bool | None,
270
234
  rpo_alpha: float | None,
271
235
  simpo_gamma: float | None,
272
- from_checkpoint: str,
273
- from_hf_model: str,
274
- hf_model_revision: str,
236
+ from_checkpoint: str | None,
237
+ from_hf_model: str | None,
238
+ hf_model_revision: str | None,
275
239
  hf_api_token: str | None,
276
240
  hf_output_repo_name: str | None,
277
241
  ) -> None:
@@ -298,7 +262,6 @@ def create(
298
262
  lora_dropout=lora_dropout,
299
263
  lora_alpha=lora_alpha,
300
264
  lora_trainable_modules=lora_trainable_modules,
301
- train_vision=train_vision,
302
265
  suffix=suffix,
303
266
  wandb_api_key=wandb_api_key,
304
267
  wandb_base_url=wandb_base_url,
@@ -324,15 +287,11 @@ def create(
324
287
  if from_checkpoint is not None:
325
288
  model_name = from_checkpoint.split(":")[0]
326
289
 
327
- model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
328
- model=model_name,
329
- )
290
+ model_limits = get_model_limits(client, str(model_name))
330
291
 
331
292
  if lora:
332
293
  if model_limits.lora_training is None:
333
- raise click.BadParameter(
334
- f"LoRA fine-tuning is not supported for the model `{model}`"
335
- )
294
+ raise click.BadParameter(f"LoRA fine-tuning is not supported for the model `{model}`")
336
295
  default_values = {
337
296
  "lora_r": model_limits.lora_training.max_rank,
338
297
  "learning_rate": 1e-3,
@@ -341,15 +300,13 @@ def create(
341
300
  for arg in default_values:
342
301
  arg_source = ctx.get_parameter_source("arg") # type: ignore[attr-defined]
343
302
  if arg_source == ParameterSource.DEFAULT:
344
- training_args[arg] = default_values[arg_source]
303
+ training_args[arg] = default_values[str(arg_source)]
345
304
 
346
305
  if ctx.get_parameter_source("lora_alpha") == ParameterSource.DEFAULT: # type: ignore[attr-defined]
347
306
  training_args["lora_alpha"] = training_args["lora_r"] * 2
348
307
  else:
349
308
  if model_limits.full_training is None:
350
- raise click.BadParameter(
351
- f"Full fine-tuning is not supported for the model `{model}`"
352
- )
309
+ raise click.BadParameter(f"Full fine-tuning is not supported for the model `{model}`")
353
310
 
354
311
  for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
355
312
  param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined]
@@ -364,52 +321,18 @@ def create(
364
321
  "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
365
322
  )
366
323
  elif n_evals > 0 and not validation_file:
367
- raise click.BadParameter(
368
- "You have specified a number of evaluation loops but no validation file."
369
- )
324
+ raise click.BadParameter("You have specified a number of evaluation loops but no validation file.")
370
325
 
371
- if model_limits.supports_vision:
372
- # Don't show price estimation for multimodal models yet
373
- confirm = True
374
-
375
- finetune_price_estimation_result = client.fine_tuning.estimate_price(
376
- training_file=training_file,
377
- validation_file=validation_file,
378
- model=model,
379
- n_epochs=n_epochs,
380
- n_evals=n_evals,
381
- training_type="lora" if lora else "full",
382
- training_method=training_method,
383
- )
384
-
385
- price = click.style(
386
- f"${finetune_price_estimation_result.estimated_total_price:.2f}",
387
- bold=True,
388
- )
389
-
390
- if not finetune_price_estimation_result.allowed_to_proceed:
391
- warning = click.style(_WARNING_MESSAGE_INSUFFICIENT_FUNDS, fg="red", bold=True)
392
- else:
393
- warning = ""
394
-
395
- confirmation_message = _CONFIRMATION_MESSAGE.format(
396
- price=price,
397
- warning=warning,
398
- )
399
-
400
- if confirm or click.confirm(confirmation_message, default=True, show_default=True):
326
+ if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True):
401
327
  response = client.fine_tuning.create(
402
328
  **training_args,
403
329
  verbose=True,
404
330
  )
331
+
405
332
  report_string = f"Successfully submitted a fine-tuning job {response.id}"
406
- if response.created_at is not None:
407
- created_time = datetime.strptime(
408
- response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z"
409
- )
410
- # created_at reports UTC time, we use .astimezone() to convert to local time
411
- formatted_time = created_time.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
412
- report_string += f" at {formatted_time}"
333
+ # created_at reports UTC time, we use .astimezone() to convert to local time
334
+ formatted_time = response.created_at.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
335
+ report_string += f" at {formatted_time}"
413
336
  rprint(report_string)
414
337
  else:
415
338
  click.echo("No confirmation received, stopping job launch")
@@ -427,22 +350,19 @@ def list(ctx: click.Context) -> None:
427
350
 
428
351
  # Use a default datetime for None values to make sure the key function always returns a comparable value
429
352
  epoch_start = datetime.fromtimestamp(0, tz=timezone.utc)
430
- response.data.sort(key=lambda x: parse_timestamp(x.created_at or "") or epoch_start)
353
+ response.data.sort(key=lambda x: x.created_at or epoch_start)
431
354
 
432
- display_list = []
355
+ display_list: List[Dict[str, Any]] = []
433
356
  for i in response.data:
434
357
  display_list.append(
435
358
  {
436
359
  "Fine-tune ID": i.id,
437
- "Model Output Name": "\n".join(wrap(i.output_name or "", width=30)),
360
+ "Model Output Name": "\n".join(wrap(i.x_model_output_name or "", width=30)),
438
361
  "Status": i.status,
439
362
  "Created At": i.created_at,
440
363
  "Price": f"""${
441
364
  finetune_price_to_dollars(float(str(i.total_price)))
442
365
  }""", # convert to string for mypy typing
443
- "Progress": generate_progress_bar(
444
- i, datetime.now().astimezone(), use_rich=False
445
- ),
446
366
  }
447
367
  )
448
368
  table = tabulate(display_list, headers="keys", tablefmt="grid", showindex=True)
@@ -462,23 +382,13 @@ def retrieve(ctx: click.Context, fine_tune_id: str) -> None:
462
382
  # remove events from response for cleaner output
463
383
  response.events = None
464
384
 
465
- rprint(JSON.from_data(response.model_dump(exclude_none=True)))
466
- progress_text = generate_progress_bar(
467
- response, datetime.now().astimezone(), use_rich=True
468
- )
469
- status = "Unknown"
470
- if response.status is not None:
471
- status = response.status.value
472
- prefix = f"Status: [bold]{status}[/bold],"
473
- rprint(f"{prefix} {progress_text}")
385
+ click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
474
386
 
475
387
 
476
388
  @fine_tuning.command()
477
389
  @click.pass_context
478
390
  @click.argument("fine_tune_id", type=str, required=True)
479
- @click.option(
480
- "--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job"
481
- )
391
+ @click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before cancelling job")
482
392
  def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None:
483
393
  """Cancel fine-tuning job"""
484
394
  client: Together = ctx.obj
@@ -492,7 +402,7 @@ def cancel(ctx: click.Context, fine_tune_id: str, quiet: bool = False) -> None:
492
402
  return
493
403
  response = client.fine_tuning.cancel(fine_tune_id)
494
404
 
495
- click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
405
+ click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4, default=datetime_serializer))
496
406
 
497
407
 
498
408
  @fine_tuning.command()
@@ -506,13 +416,13 @@ def list_events(ctx: click.Context, fine_tune_id: str) -> None:
506
416
 
507
417
  response.data = response.data or []
508
418
 
509
- display_list = []
419
+ display_list: List[Dict[str, Any]] = []
510
420
  for i in response.data:
511
421
  display_list.append(
512
422
  {
513
423
  "Message": "\n".join(wrap(i.message or "", width=50)),
514
424
  "Type": i.type,
515
- "Created At": parse_timestamp(i.created_at or ""),
425
+ "Created At": i.created_at,
516
426
  "Hash": i.hash,
517
427
  }
518
428
  )
@@ -530,13 +440,18 @@ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
530
440
 
531
441
  checkpoints = client.fine_tuning.list_checkpoints(fine_tune_id)
532
442
 
533
- display_list = []
534
- for checkpoint in checkpoints:
443
+ display_list: List[Dict[str, Any]] = []
444
+ for checkpoint in checkpoints.data:
445
+ name = (
446
+ f"{fine_tune_id}:{checkpoint.step}"
447
+ if "intermediate" in checkpoint.checkpoint_type.lower()
448
+ else fine_tune_id
449
+ )
535
450
  display_list.append(
536
451
  {
537
- "Type": checkpoint.type,
538
- "Timestamp": format_timestamp(checkpoint.timestamp),
539
- "Name": checkpoint.name,
452
+ "Type": checkpoint.checkpoint_type,
453
+ "Timestamp": format_timestamp(checkpoint.created_at),
454
+ "Name": name,
540
455
  }
541
456
  )
542
457
 
@@ -549,7 +464,7 @@ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
549
464
  click.echo(f"No checkpoints found for job {fine_tune_id}")
550
465
 
551
466
 
552
- @fine_tuning.command()
467
+ @fine_tuning.command(name="download")
553
468
  @click.pass_context
554
469
  @click.argument("fine_tune_id", type=str, required=True)
555
470
  @click.option(
@@ -570,48 +485,77 @@ def list_checkpoints(ctx: click.Context, fine_tune_id: str) -> None:
570
485
  )
571
486
  @click.option(
572
487
  "--checkpoint-type",
573
- type=DownloadCheckpointTypeChoice(),
488
+ type=click.Choice(["merged", "adapter", "default"]),
574
489
  required=False,
575
- default=DownloadCheckpointType.DEFAULT.value,
490
+ default="merged",
576
491
  help="Specifies checkpoint type. 'merged' and 'adapter' options work only for LoRA jobs.",
577
492
  )
578
493
  def download(
579
494
  ctx: click.Context,
580
495
  fine_tune_id: str,
581
- output_dir: str,
582
- checkpoint_step: int | None,
583
- checkpoint_type: DownloadCheckpointType,
496
+ output_dir: str | None = None,
497
+ checkpoint_step: Union[int, NotGiven] = NOT_GIVEN,
498
+ checkpoint_type: Literal["default", "merged", "adapter"] | NotGiven = NOT_GIVEN,
584
499
  ) -> None:
585
500
  """Download fine-tuning checkpoint"""
586
501
  client: Together = ctx.obj
587
502
 
588
- response = client.fine_tuning.download(
589
- fine_tune_id,
590
- output=output_dir,
591
- checkpoint_step=checkpoint_step,
592
- checkpoint_type=checkpoint_type,
503
+ if re.match(_FT_JOB_WITH_STEP_REGEX, fine_tune_id) is not None:
504
+ if checkpoint_step is NOT_GIVEN:
505
+ checkpoint_step = int(fine_tune_id.split(":")[1])
506
+ fine_tune_id = fine_tune_id.split(":")[0]
507
+ else:
508
+ raise ValueError(
509
+ "Fine-tuning job ID {fine_tune_id} contains a colon to specify the step to download, but `checkpoint_step` "
510
+ "was also set. Remove one of the step specifiers to proceed."
511
+ )
512
+
513
+ ft_job = client.fine_tuning.retrieve(fine_tune_id)
514
+
515
+ loosely_typed_checkpoint_type: str | NotGiven = checkpoint_type
516
+ if isinstance(ft_job.training_type, TrainingTypeFullTrainingType):
517
+ if checkpoint_type != "default":
518
+ raise ValueError("Only DEFAULT checkpoint type is allowed for FullTrainingType")
519
+ loosely_typed_checkpoint_type = "model_output_path"
520
+ elif isinstance(ft_job.training_type, TrainingTypeLoRaTrainingType):
521
+ if checkpoint_type == "default":
522
+ loosely_typed_checkpoint_type = "merged"
523
+
524
+ if checkpoint_type not in {
525
+ "merged",
526
+ "adapter",
527
+ }:
528
+ raise ValueError(f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}")
529
+
530
+ remote_name = ft_job.x_model_output_name
531
+
532
+ url = f"/finetune/download?ft_id={fine_tune_id}&checkpoint={loosely_typed_checkpoint_type}"
533
+ output: Path | None = None
534
+ if isinstance(output_dir, str):
535
+ output = Path(output_dir)
536
+
537
+ file_path, file_size = DownloadManager(client).download(
538
+ url=url,
539
+ output=output,
540
+ remote_name=remote_name,
541
+ fetch_metadata=True,
593
542
  )
594
543
 
595
- click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
544
+ click.echo(json.dumps({"object": "local", "id": fine_tune_id, "filename": file_path, "size": file_size}, indent=4))
596
545
 
597
546
 
598
547
  @fine_tuning.command()
599
548
  @click.pass_context
600
549
  @click.argument("fine_tune_id", type=str, required=True)
601
550
  @click.option("--force", is_flag=True, help="Force deletion without confirmation")
602
- @click.option(
603
- "--quiet", is_flag=True, help="Do not prompt for confirmation before deleting job"
604
- )
605
- def delete(
606
- ctx: click.Context, fine_tune_id: str, force: bool = False, quiet: bool = False
607
- ) -> None:
551
+ @click.option("--quiet", is_flag=True, help="Do not prompt for confirmation before deleting job")
552
+ def delete(ctx: click.Context, fine_tune_id: str, force: bool = False, quiet: bool = False) -> None:
608
553
  """Delete fine-tuning job"""
609
554
  client: Together = ctx.obj
610
555
 
611
556
  if not quiet:
612
557
  confirm_response = input(
613
- f"Are you sure you want to delete fine-tuning job {fine_tune_id}? "
614
- "This action cannot be undone. [y/N] "
558
+ f"Are you sure you want to delete fine-tuning job {fine_tune_id}? This action cannot be undone. [y/N] "
615
559
  )
616
560
  if confirm_response.lower() != "y":
617
561
  click.echo("Deletion cancelled")
@@ -1,10 +1,14 @@
1
1
  import json as json_lib
2
+ from typing import Any, Dict, List, Literal, Optional
2
3
 
3
4
  import click
4
5
  from tabulate import tabulate
5
6
 
6
- from together import Together
7
- from together.types.models import ModelObject, ModelUploadResponse
7
+ from together import Together, omit
8
+ from together._models import BaseModel
9
+ from together._response import APIResponse as APIResponse
10
+ from together.lib.resources.models import filter_by_dedicated_models
11
+ from together.types.model_upload_response import ModelUploadResponse
8
12
 
9
13
 
10
14
  @click.group()
@@ -26,16 +30,19 @@ def models(ctx: click.Context) -> None:
26
30
  help="Output in JSON format",
27
31
  )
28
32
  @click.pass_context
29
- def list(ctx: click.Context, type: str | None, json: bool) -> None:
33
+ def list(ctx: click.Context, type: Optional[str], json: bool) -> None:
30
34
  """List models"""
31
35
  client: Together = ctx.obj
32
36
 
33
- response = client.models.list(dedicated=(type == "dedicated"))
37
+ response = client.models.list()
38
+ models_list = response
34
39
 
35
- display_list = []
40
+ if type == "dedicated":
41
+ models_list = filter_by_dedicated_models(client, models_list)
36
42
 
37
- model: ModelObject
38
- for model in response:
43
+ display_list: List[Dict[str, Any]] = []
44
+ model: BaseModel
45
+ for model in models_list:
39
46
  display_list.append(
40
47
  {
41
48
  "ID": model.id,
@@ -44,8 +51,8 @@ def list(ctx: click.Context, type: str | None, json: bool) -> None:
44
51
  "Type": model.type,
45
52
  "Context Length": model.context_length,
46
53
  "License": model.license,
47
- "Input per 1M token": model.pricing.input,
48
- "Output per 1M token": model.pricing.output,
54
+ "Input per 1M token": model.pricing.input if model.pricing else None,
55
+ "Output per 1M token": model.pricing.output if model.pricing else None,
49
56
  }
50
57
  )
51
58
 
@@ -98,12 +105,12 @@ def upload(
98
105
  ctx: click.Context,
99
106
  model_name: str,
100
107
  model_source: str,
101
- model_type: str,
102
- hf_token: str | None,
103
- description: str | None,
104
- base_model: str | None,
105
- lora_model: str | None,
108
+ hf_token: Optional[str],
109
+ description: Optional[str],
110
+ base_model: Optional[str],
111
+ lora_model: Optional[str],
106
112
  json: bool,
113
+ model_type: Optional[Literal["model", "adapter"]] = "model",
107
114
  ) -> None:
108
115
  """Upload a custom model or adapter from Hugging Face or S3"""
109
116
  client: Together = ctx.obj
@@ -111,23 +118,23 @@ def upload(
111
118
  response: ModelUploadResponse = client.models.upload(
112
119
  model_name=model_name,
113
120
  model_source=model_source,
114
- model_type=model_type,
115
- hf_token=hf_token,
116
- description=description,
117
- base_model=base_model,
118
- lora_model=lora_model,
121
+ model_type=model_type or omit,
122
+ hf_token=hf_token or omit,
123
+ description=description or omit,
124
+ base_model=base_model or omit,
125
+ lora_model=lora_model or omit,
119
126
  )
120
127
 
121
128
  if json:
122
129
  click.echo(json_lib.dumps(response.model_dump(), indent=2))
123
130
  else:
124
131
  click.echo(f"Model upload job created successfully!")
125
- if response.job_id:
126
- click.echo(f"Job ID: {response.job_id}")
127
- if response.model_name:
128
- click.echo(f"Model Name: {response.model_name}")
129
- if response.model_id:
130
- click.echo(f"Model ID: {response.model_id}")
131
- if response.model_source:
132
- click.echo(f"Model Source: {response.model_source}")
132
+ if response.data.job_id:
133
+ click.echo(f"Job ID: {response.data.job_id}")
134
+ if response.data.x_model_name:
135
+ click.echo(f"Model Name: {response.data.x_model_name}")
136
+ if response.data.x_model_id:
137
+ click.echo(f"Model ID: {response.data.x_model_id}")
138
+ if response.data.x_model_source:
139
+ click.echo(f"Model Source: {response.data.x_model_source}")
133
140
  click.echo(f"Message: {response.message}")
@@ -0,0 +1,50 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+ from gettext import gettext as _
5
+ from typing_extensions import override
6
+
7
+ import click
8
+
9
+
10
+ class AutoIntParamType(click.ParamType):
11
+ name = "integer_or_max"
12
+ _number_class = int
13
+
14
+ @override
15
+ def convert(
16
+ self, value: str, param: click.Parameter | None, ctx: click.Context | None
17
+ ) -> int | Literal["max"] | None:
18
+ if value == "max":
19
+ return "max"
20
+ try:
21
+ return int(value)
22
+ except ValueError:
23
+ self.fail(
24
+ _("{value!r} is not a valid {number_type}.").format(value=value, number_type=self.name),
25
+ param,
26
+ ctx,
27
+ )
28
+
29
+
30
+ class BooleanWithAutoParamType(click.ParamType):
31
+ name = "boolean_or_auto"
32
+
33
+ @override
34
+ def convert(
35
+ self, value: str, param: click.Parameter | None, ctx: click.Context | None
36
+ ) -> bool | Literal["auto"] | None:
37
+ if value == "auto":
38
+ return "auto"
39
+ try:
40
+ return bool(value)
41
+ except ValueError:
42
+ self.fail(
43
+ _("{value!r} is not a valid {type}.").format(value=value, type=self.name),
44
+ param,
45
+ ctx,
46
+ )
47
+
48
+
49
+ INT_WITH_MAX = AutoIntParamType()
50
+ BOOL_WITH_AUTO = BooleanWithAutoParamType()