together 1.2.12__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,15 +1,29 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
+ from datetime import datetime
4
5
  from textwrap import wrap
6
+ from typing import Any, Literal
5
7
 
6
8
  import click
7
9
  from click.core import ParameterSource # type: ignore[attr-defined]
10
+ from rich import print as rprint
8
11
  from tabulate import tabulate
9
12
 
10
13
  from together import Together
14
+ from together.cli.api.utils import INT_WITH_MAX
11
15
  from together.utils import finetune_price_to_dollars, log_warn, parse_timestamp
12
- from together.types.finetune import DownloadCheckpointType
16
+ from together.types.finetune import DownloadCheckpointType, FinetuneTrainingLimits
17
+
18
+
19
+ _CONFIRMATION_MESSAGE = (
20
+ "You are about to create a fine-tuning job. "
21
+ "The cost of your job will be determined by the model size, the number of tokens "
22
+ "in the training file, the number of tokens in the validation file, the number of epochs, and "
23
+ "the number of evaluations. Visit https://www.together.ai/pricing to get a price estimate.\n"
24
+ "You can pass `-y` or `--confirm` to your command to skip this message.\n\n"
25
+ "Do you want to proceed?"
26
+ )
13
27
 
14
28
 
15
29
  class DownloadCheckpointTypeChoice(click.Choice):
@@ -44,7 +58,7 @@ def fine_tuning(ctx: click.Context) -> None:
44
58
  @click.option(
45
59
  "--n-checkpoints", type=int, default=1, help="Number of checkpoints to save"
46
60
  )
47
- @click.option("--batch-size", type=int, default=16, help="Train batch size")
61
+ @click.option("--batch-size", type=INT_WITH_MAX, default="max", help="Train batch size")
48
62
  @click.option("--learning-rate", type=float, default=1e-5, help="Learning rate")
49
63
  @click.option(
50
64
  "--lora/--no-lora",
@@ -65,6 +79,14 @@ def fine_tuning(ctx: click.Context) -> None:
65
79
  "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
66
80
  )
67
81
  @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
82
+ @click.option(
83
+ "--confirm",
84
+ "-y",
85
+ type=bool,
86
+ is_flag=True,
87
+ default=False,
88
+ help="Whether to skip the launch confirmation message",
89
+ )
68
90
  def create(
69
91
  ctx: click.Context,
70
92
  training_file: str,
@@ -73,7 +95,7 @@ def create(
73
95
  n_epochs: int,
74
96
  n_evals: int,
75
97
  n_checkpoints: int,
76
- batch_size: int,
98
+ batch_size: int | Literal["max"],
77
99
  learning_rate: float,
78
100
  lora: bool,
79
101
  lora_r: int,
@@ -82,34 +104,12 @@ def create(
82
104
  lora_trainable_modules: str,
83
105
  suffix: str,
84
106
  wandb_api_key: str,
107
+ confirm: bool,
85
108
  ) -> None:
86
109
  """Start fine-tuning"""
87
110
  client: Together = ctx.obj
88
111
 
89
- if lora:
90
- learning_rate_source = click.get_current_context().get_parameter_source( # type: ignore[attr-defined]
91
- "learning_rate"
92
- )
93
- if learning_rate_source == ParameterSource.DEFAULT:
94
- learning_rate = 1e-3
95
- else:
96
- for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
97
- param_source = click.get_current_context().get_parameter_source(param) # type: ignore[attr-defined]
98
- if param_source != ParameterSource.DEFAULT:
99
- raise click.BadParameter(
100
- f"You set LoRA parameter `{param}` for a full fine-tuning job. "
101
- f"Please change the job type with --lora or remove `{param}` from the arguments"
102
- )
103
- if n_evals <= 0 and validation_file:
104
- log_warn(
105
- "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
106
- )
107
- elif n_evals > 0 and not validation_file:
108
- raise click.BadParameter(
109
- "You have specified a number of evaluation loops but no validation file."
110
- )
111
-
112
- response = client.fine_tuning.create(
112
+ training_args: dict[str, Any] = dict(
113
113
  training_file=training_file,
114
114
  model=model,
115
115
  n_epochs=n_epochs,
@@ -127,13 +127,87 @@ def create(
127
127
  wandb_api_key=wandb_api_key,
128
128
  )
129
129
 
130
- click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
131
-
132
- # TODO: Remove it after the 21st of August
133
- log_warn(
134
- "The default value of batch size has been changed from 32 to 16 since together version >= 1.2.6"
130
+ model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
131
+ model=model
135
132
  )
136
133
 
134
+ if lora:
135
+ if model_limits.lora_training is None:
136
+ raise click.BadParameter(
137
+ f"LoRA fine-tuning is not supported for the model `{model}`"
138
+ )
139
+
140
+ default_values = {
141
+ "lora_r": model_limits.lora_training.max_rank,
142
+ "batch_size": model_limits.lora_training.max_batch_size,
143
+ "learning_rate": 1e-3,
144
+ }
145
+ for arg in default_values:
146
+ arg_source = ctx.get_parameter_source("arg") # type: ignore[attr-defined]
147
+ if arg_source == ParameterSource.DEFAULT:
148
+ training_args[arg] = default_values[arg_source]
149
+
150
+ if ctx.get_parameter_source("lora_alpha") == ParameterSource.DEFAULT: # type: ignore[attr-defined]
151
+ training_args["lora_alpha"] = training_args["lora_r"] * 2
152
+ else:
153
+ if model_limits.full_training is None:
154
+ raise click.BadParameter(
155
+ f"Full fine-tuning is not supported for the model `{model}`"
156
+ )
157
+
158
+ for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
159
+ param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined]
160
+ if param_source != ParameterSource.DEFAULT:
161
+ raise click.BadParameter(
162
+ f"You set LoRA parameter `{param}` for a full fine-tuning job. "
163
+ f"Please change the job type with --lora or remove `{param}` from the arguments"
164
+ )
165
+
166
+ batch_size_source = ctx.get_parameter_source("batch_size") # type: ignore[attr-defined]
167
+ if batch_size_source == ParameterSource.DEFAULT:
168
+ training_args["batch_size"] = model_limits.full_training.max_batch_size
169
+
170
+ if n_evals <= 0 and validation_file:
171
+ log_warn(
172
+ "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
173
+ )
174
+ elif n_evals > 0 and not validation_file:
175
+ raise click.BadParameter(
176
+ "You have specified a number of evaluation loops but no validation file."
177
+ )
178
+
179
+ if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True):
180
+ response = client.fine_tuning.create(
181
+ training_file=training_file,
182
+ model=model,
183
+ n_epochs=n_epochs,
184
+ validation_file=validation_file,
185
+ n_evals=n_evals,
186
+ n_checkpoints=n_checkpoints,
187
+ batch_size=batch_size,
188
+ learning_rate=learning_rate,
189
+ lora=lora,
190
+ lora_r=lora_r,
191
+ lora_dropout=lora_dropout,
192
+ lora_alpha=lora_alpha,
193
+ lora_trainable_modules=lora_trainable_modules,
194
+ suffix=suffix,
195
+ wandb_api_key=wandb_api_key,
196
+ verbose=True,
197
+ )
198
+
199
+ report_string = f"Successfully submitted a fine-tuning job {response.id}"
200
+ if response.created_at is not None:
201
+ created_time = datetime.strptime(
202
+ response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z"
203
+ )
204
+ # created_at reports UTC time, we use .astimezone() to convert to local time
205
+ formatted_time = created_time.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
206
+ report_string += f" at {formatted_time}"
207
+ rprint(report_string)
208
+ else:
209
+ click.echo("No confirmation received, stopping job launch")
210
+
137
211
 
138
212
  @fine_tuning.command()
139
213
  @click.pass_context
@@ -0,0 +1,21 @@
1
+ import click
2
+
3
+ from typing import Literal
4
+
5
+
6
+ class AutoIntParamType(click.ParamType):
7
+ name = "integer"
8
+
9
+ def convert(
10
+ self, value: str, param: click.Parameter | None, ctx: click.Context | None
11
+ ) -> int | Literal["max"] | None:
12
+ if isinstance(value, int):
13
+ return value
14
+
15
+ if value == "max":
16
+ return "max"
17
+
18
+ self.fail("Invalid integer value: {value}")
19
+
20
+
21
+ INT_WITH_MAX = AutoIntParamType()
together/constants.py CHANGED
@@ -19,7 +19,7 @@ Please set it as an environment variable or set it as together.api_key
19
19
  Find your TOGETHER_API_KEY at https://api.together.xyz/settings/api-keys"""
20
20
 
21
21
  # Minimum number of samples required for fine-tuning file
22
- MIN_SAMPLES = 100
22
+ MIN_SAMPLES = 1
23
23
 
24
24
  # the number of bytes in a gigabyte, used to convert bytes to GB for readable comparison
25
25
  NUM_BYTES_IN_GB = 2**30
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import warnings
4
- from typing import Any, Dict, List
4
+ from typing import Any, Dict, List, Literal
5
5
 
6
6
  import together
7
7
  from together.legacy.base import API_KEY_WARNING, deprecated
@@ -43,7 +43,7 @@ class Finetune:
43
43
  model=model,
44
44
  n_epochs=n_epochs,
45
45
  n_checkpoints=n_checkpoints,
46
- batch_size=batch_size,
46
+ batch_size=batch_size if isinstance(batch_size, int) else "max",
47
47
  learning_rate=learning_rate,
48
48
  suffix=suffix,
49
49
  wandb_api_key=wandb_api_key,
@@ -20,7 +20,7 @@ class ChatCompletions:
20
20
  def create(
21
21
  self,
22
22
  *,
23
- messages: List[Dict[str, str]],
23
+ messages: List[Dict[str, Any]],
24
24
  model: str,
25
25
  max_tokens: int | None = None,
26
26
  stop: List[str] | None = None,
@@ -39,7 +39,7 @@ class ChatCompletions:
39
39
  n: int | None = None,
40
40
  safety_model: str | None = None,
41
41
  response_format: Dict[str, str | Dict[str, Any]] | None = None,
42
- tools: Dict[str, str | Dict[str, Any]] | None = None,
42
+ tools: List[Dict[str, Any]] | None = None,
43
43
  tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
44
44
  **kwargs: Any,
45
45
  ) -> ChatCompletionResponse | Iterator[ChatCompletionChunk]:
@@ -1,6 +1,9 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from pathlib import Path
4
+ from typing import Literal
5
+
6
+ from rich import print as rprint
4
7
 
5
8
  from together.abstract import api_requestor
6
9
  from together.filemanager import DownloadManager
@@ -11,6 +14,7 @@ from together.types import (
11
14
  FinetuneListEvents,
12
15
  FinetuneRequest,
13
16
  FinetuneResponse,
17
+ FinetuneTrainingLimits,
14
18
  FullTrainingType,
15
19
  LoRATrainingType,
16
20
  TogetherClient,
@@ -18,7 +22,7 @@ from together.types import (
18
22
  TrainingType,
19
23
  )
20
24
  from together.types.finetune import DownloadCheckpointType
21
- from together.utils import log_warn, normalize_key
25
+ from together.utils import log_warn_once, normalize_key
22
26
 
23
27
 
24
28
  class FineTuning:
@@ -34,15 +38,17 @@ class FineTuning:
34
38
  validation_file: str | None = "",
35
39
  n_evals: int | None = 0,
36
40
  n_checkpoints: int | None = 1,
37
- batch_size: int | None = 16,
41
+ batch_size: int | Literal["max"] = "max",
38
42
  learning_rate: float | None = 0.00001,
39
43
  lora: bool = False,
40
- lora_r: int | None = 8,
44
+ lora_r: int | None = None,
41
45
  lora_dropout: float | None = 0,
42
- lora_alpha: float | None = 8,
46
+ lora_alpha: float | None = None,
43
47
  lora_trainable_modules: str | None = "all-linear",
44
48
  suffix: str | None = None,
45
49
  wandb_api_key: str | None = None,
50
+ verbose: bool = False,
51
+ model_limits: FinetuneTrainingLimits | None = None,
46
52
  ) -> FinetuneResponse:
47
53
  """
48
54
  Method to initiate a fine-tuning job
@@ -55,7 +61,7 @@ class FineTuning:
55
61
  n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
56
62
  n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
57
63
  Defaults to 1.
58
- batch_size (int, optional): Batch size for fine-tuning. Defaults to 32.
64
+ batch_size (int, optional): Batch size for fine-tuning. Defaults to max.
59
65
  learning_rate (float, optional): Learning rate multiplier to use for training
60
66
  Defaults to 0.00001.
61
67
  lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
@@ -67,17 +73,38 @@ class FineTuning:
67
73
  Defaults to None.
68
74
  wandb_api_key (str, optional): API key for Weights & Biases integration.
69
75
  Defaults to None.
76
+ verbose (bool, optional): whether to print the job parameters before submitting a request.
77
+ Defaults to False.
78
+ model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
79
+ Defaults to None.
70
80
 
71
81
  Returns:
72
82
  FinetuneResponse: Object containing information about fine-tuning job.
73
83
  """
74
84
 
85
+ if batch_size == "max":
86
+ log_warn_once(
87
+ "Starting from together>=1.3.0, "
88
+ "the default batch size is set to the maximum allowed value for each model."
89
+ )
90
+
75
91
  requestor = api_requestor.APIRequestor(
76
92
  client=self._client,
77
93
  )
78
94
 
95
+ if model_limits is None:
96
+ model_limits = self.get_model_limits(model=model)
97
+
79
98
  training_type: TrainingType = FullTrainingType()
80
99
  if lora:
100
+ if model_limits.lora_training is None:
101
+ raise ValueError(
102
+ "LoRA adapters are not supported for the selected model."
103
+ )
104
+ lora_r = (
105
+ lora_r if lora_r is not None else model_limits.lora_training.max_rank
106
+ )
107
+ lora_alpha = lora_alpha if lora_alpha is not None else lora_r * 2
81
108
  training_type = LoRATrainingType(
82
109
  lora_r=lora_r,
83
110
  lora_alpha=lora_alpha,
@@ -85,7 +112,23 @@ class FineTuning:
85
112
  lora_trainable_modules=lora_trainable_modules,
86
113
  )
87
114
 
88
- parameter_payload = FinetuneRequest(
115
+ batch_size = (
116
+ batch_size
117
+ if batch_size != "max"
118
+ else model_limits.lora_training.max_batch_size
119
+ )
120
+ else:
121
+ if model_limits.full_training is None:
122
+ raise ValueError(
123
+ "Full training is not supported for the selected model."
124
+ )
125
+ batch_size = (
126
+ batch_size
127
+ if batch_size != "max"
128
+ else model_limits.full_training.max_batch_size
129
+ )
130
+
131
+ finetune_request = FinetuneRequest(
89
132
  model=model,
90
133
  training_file=training_file,
91
134
  validation_file=validation_file,
@@ -97,7 +140,13 @@ class FineTuning:
97
140
  training_type=training_type,
98
141
  suffix=suffix,
99
142
  wandb_key=wandb_api_key,
100
- ).model_dump(exclude_none=True)
143
+ )
144
+ if verbose:
145
+ rprint(
146
+ "Submitting a fine-tuning job with the following parameters:",
147
+ finetune_request,
148
+ )
149
+ parameter_payload = finetune_request.model_dump(exclude_none=True)
101
150
 
102
151
  response, _, _ = requestor.request(
103
152
  options=TogetherRequest(
@@ -110,17 +159,6 @@ class FineTuning:
110
159
 
111
160
  assert isinstance(response, TogetherResponse)
112
161
 
113
- # TODO: Remove it after the 21st of August
114
- log_warn(
115
- "The default value of batch size has been changed from 32 to 16 since together version >= 1.2.6"
116
- )
117
-
118
- # TODO: Remove after next LoRA default change
119
- log_warn(
120
- "Some of the jobs run _directly_ from the together-python library might be trained using LoRA adapters. "
121
- "The version range when this change occurred is from 1.2.3 to 1.2.6."
122
- )
123
-
124
162
  return FinetuneResponse(**response.data)
125
163
 
126
164
  def list(self) -> FinetuneList:
@@ -266,7 +304,7 @@ class FineTuning:
266
304
  raise ValueError(
267
305
  "Only DEFAULT checkpoint type is allowed for FullTrainingType"
268
306
  )
269
- url += f"&checkpoint=modelOutputPath"
307
+ url += "&checkpoint=modelOutputPath"
270
308
  elif isinstance(ft_job.training_type, LoRATrainingType):
271
309
  if checkpoint_type == DownloadCheckpointType.DEFAULT:
272
310
  checkpoint_type = DownloadCheckpointType.MERGED
@@ -299,6 +337,34 @@ class FineTuning:
299
337
  size=file_size,
300
338
  )
301
339
 
340
+ def get_model_limits(self, *, model: str) -> FinetuneTrainingLimits:
341
+ """
342
+ Requests training limits for a specific model
343
+
344
+ Args:
345
+ model_name (str): Name of the model to get limits for
346
+
347
+ Returns:
348
+ FinetuneTrainingLimits: Object containing training limits for the model
349
+ """
350
+
351
+ requestor = api_requestor.APIRequestor(
352
+ client=self._client,
353
+ )
354
+
355
+ model_limits_response, _, _ = requestor.request(
356
+ options=TogetherRequest(
357
+ method="GET",
358
+ url="fine-tunes/models/limits",
359
+ params={"model_name": model},
360
+ ),
361
+ stream=False,
362
+ )
363
+
364
+ model_limits = FinetuneTrainingLimits(**model_limits_response.data)
365
+
366
+ return model_limits
367
+
302
368
 
303
369
  class AsyncFineTuning:
304
370
  def __init__(self, client: TogetherClient) -> None:
@@ -487,3 +553,31 @@ class AsyncFineTuning:
487
553
  "AsyncFineTuning.download not implemented. "
488
554
  "Please use FineTuning.download function instead."
489
555
  )
556
+
557
+ async def get_model_limits(self, *, model: str) -> FinetuneTrainingLimits:
558
+ """
559
+ Requests training limits for a specific model
560
+
561
+ Args:
562
+ model_name (str): Name of the model to get limits for
563
+
564
+ Returns:
565
+ FinetuneTrainingLimits: Object containing training limits for the model
566
+ """
567
+
568
+ requestor = api_requestor.APIRequestor(
569
+ client=self._client,
570
+ )
571
+
572
+ model_limits_response, _, _ = await requestor.arequest(
573
+ options=TogetherRequest(
574
+ method="GET",
575
+ url="fine-tunes/models/limits",
576
+ params={"model": model},
577
+ ),
578
+ stream=False,
579
+ )
580
+
581
+ model_limits = FinetuneTrainingLimits(**model_limits_response.data)
582
+
583
+ return model_limits
@@ -29,6 +29,7 @@ from together.types.finetune import (
29
29
  FullTrainingType,
30
30
  LoRATrainingType,
31
31
  TrainingType,
32
+ FinetuneTrainingLimits,
32
33
  )
33
34
  from together.types.images import (
34
35
  ImageRequest,
@@ -71,4 +72,5 @@ __all__ = [
71
72
  "LoRATrainingType",
72
73
  "RerankRequest",
73
74
  "RerankResponse",
75
+ "FinetuneTrainingLimits",
74
76
  ]
@@ -40,9 +40,24 @@ class ToolCalls(BaseModel):
40
40
  function: FunctionCall | None = None
41
41
 
42
42
 
43
+ class ChatCompletionMessageContentType(str, Enum):
44
+ TEXT = "text"
45
+ IMAGE_URL = "image_url"
46
+
47
+
48
+ class ChatCompletionMessageContentImageURL(BaseModel):
49
+ url: str
50
+
51
+
52
+ class ChatCompletionMessageContent(BaseModel):
53
+ type: ChatCompletionMessageContentType
54
+ text: str | None = None
55
+ image_url: ChatCompletionMessageContentImageURL | None = None
56
+
57
+
43
58
  class ChatCompletionMessage(BaseModel):
44
59
  role: MessageRole
45
- content: str | None = None
60
+ content: str | List[ChatCompletionMessageContent] | None = None
46
61
  tool_calls: List[ToolCalls] | None = None
47
62
 
48
63
 
@@ -263,3 +263,21 @@ class FinetuneDownloadResult(BaseModel):
263
263
  filename: str | None = None
264
264
  # size in bytes
265
265
  size: int | None = None
266
+
267
+
268
+ class FinetuneFullTrainingLimits(BaseModel):
269
+ max_batch_size: int
270
+ min_batch_size: int
271
+
272
+
273
+ class FinetuneLoraTrainingLimits(FinetuneFullTrainingLimits):
274
+ max_rank: int
275
+ target_modules: List[str]
276
+
277
+
278
+ class FinetuneTrainingLimits(BaseModel):
279
+ max_num_epochs: int
280
+ max_learning_rate: float
281
+ min_learning_rate: float
282
+ full_training: FinetuneFullTrainingLimits | None = None
283
+ lora_training: FinetuneLoraTrainingLimits | None = None
@@ -1,4 +1,4 @@
1
- from together.utils._log import log_debug, log_info, log_warn, logfmt
1
+ from together.utils._log import log_debug, log_info, log_warn, log_warn_once, logfmt
2
2
  from together.utils.api_helpers import default_api_key, get_headers
3
3
  from together.utils.files import check_file
4
4
  from together.utils.tools import (
@@ -18,6 +18,7 @@ __all__ = [
18
18
  "log_debug",
19
19
  "log_info",
20
20
  "log_warn",
21
+ "log_warn_once",
21
22
  "logfmt",
22
23
  "enforce_trailing_slash",
23
24
  "normalize_key",
together/utils/_log.py CHANGED
@@ -13,6 +13,8 @@ logger = logging.getLogger("together")
13
13
 
14
14
  TOGETHER_LOG = os.environ.get("TOGETHER_LOG")
15
15
 
16
+ WARNING_MESSAGES_ONCE = set()
17
+
16
18
 
17
19
  def _console_log_level() -> str | None:
18
20
  if together.log in ["debug", "info"]:
@@ -59,3 +61,11 @@ def log_warn(message: str | Any, **params: Any) -> None:
59
61
  msg = logfmt(dict(message=message, **params))
60
62
  print(msg, file=sys.stderr)
61
63
  logger.warn(msg)
64
+
65
+
66
+ def log_warn_once(message: str | Any, **params: Any) -> None:
67
+ msg = logfmt(dict(message=message, **params))
68
+ if msg not in WARNING_MESSAGES_ONCE:
69
+ print(msg, file=sys.stderr)
70
+ logger.warn(msg)
71
+ WARNING_MESSAGES_ONCE.add(msg)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.2.12
3
+ Version: 1.3.0
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -25,6 +25,7 @@ Requires-Dist: pillow (>=10.3.0,<11.0.0)
25
25
  Requires-Dist: pyarrow (>=10.0.1)
26
26
  Requires-Dist: pydantic (>=2.6.3,<3.0.0)
27
27
  Requires-Dist: requests (>=2.31.0,<3.0.0)
28
+ Requires-Dist: rich (>=13.8.1,<14.0.0)
28
29
  Requires-Dist: tabulate (>=0.9.0,<0.10.0)
29
30
  Requires-Dist: tqdm (>=4.66.2,<5.0.0)
30
31
  Requires-Dist: typer (>=0.9,<0.13)
@@ -276,7 +277,7 @@ client.fine_tuning.create(
276
277
  model = 'mistralai/Mixtral-8x7B-Instruct-v0.1',
277
278
  n_epochs = 3,
278
279
  n_checkpoints = 1,
279
- batch_size = 4,
280
+ batch_size = "max",
280
281
  learning_rate = 1e-5,
281
282
  suffix = 'my-demo-finetune',
282
283
  wandb_api_key = '1a2b3c4d5e.......',
@@ -6,12 +6,13 @@ together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
9
- together/cli/api/finetune.py,sha256=c6-T-5K7sODgQa7ehdqZqyluRaDUzPXCgMeFWm3iIwA,8855
9
+ together/cli/api/finetune.py,sha256=1oZo2zJGMcUHpnEJuCYsY_w4MAYjUuVG9coCvmZJv74,11801
10
10
  together/cli/api/images.py,sha256=01dFYa2sK1HqUwVCD9FlwcjqkYWLoNxFZkzok13EriE,2363
11
11
  together/cli/api/models.py,sha256=xWEzu8ZpxM_Pz9KEjRPRVuv_v22RayYZ4QcgiezT5tE,1126
12
+ together/cli/api/utils.py,sha256=IoMIilvBOE5zdq6OgCU2Qa9p_CbrsXCR7b9q3MWu9S0,446
12
13
  together/cli/cli.py,sha256=RC0tgapkSOFjsRPg8p-8dx9D2LDzm8YmVCHUjk_aVyQ,1977
13
14
  together/client.py,sha256=mOlIFjjE9eSTb0o_weaKJwm8qvWNKHDiMmp8kQ7y68I,4946
14
- together/constants.py,sha256=WHe6JA9TliwgErkCnovWPS9w9xXfA3X5PtKJv_y2JxQ,908
15
+ together/constants.py,sha256=6DAvMTrGYI73gUFRbfBdLfDxksucpKjKsiH07PGtSSM,906
15
16
  together/error.py,sha256=emjhTSsLwiZvW0v1EmYemjacCMtcFIKAXWWK_2IdP18,5419
16
17
  together/filemanager.py,sha256=QHhBn73oVFdgUpSYXYLmJzHJ9c5wYEMJC0ur6ZgDeYo,11269
17
18
  together/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -19,40 +20,40 @@ together/legacy/base.py,sha256=ehrX1SCfRbK5OA83wL1q7-tfF-yuZOUxzjxYfFtdvvQ,727
19
20
  together/legacy/complete.py,sha256=NRJX-vjnkg4HrgDo9LS3jFfhwfXpeGxcl24dcrLPK3A,2439
20
21
  together/legacy/embeddings.py,sha256=nyTERjyPLTm7Sc987a9FJt1adnW7gIa7xs2CwXLE9EI,635
21
22
  together/legacy/files.py,sha256=qmAqMiNTPWb6WvLV5Tsv6kxGRfQ31q7OkHZNFwkw8v0,4082
22
- together/legacy/finetune.py,sha256=k-lERbZLEZlW1QQ9A9zhhwl5KIPjf_jT0R0LSiLbD2Y,5063
23
+ together/legacy/finetune.py,sha256=LENaqegeb1PszXDbAhTNPro7T3isz6X_IICIOKH7dKE,5114
23
24
  together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
24
25
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
25
26
  together/resources/__init__.py,sha256=7BLdBCNUbgi5mz30EFfdkdIYiGfFCkiUbdNzMY1-igY,792
26
27
  together/resources/chat/__init__.py,sha256=RsTptdP8MeGjcdIjze896-J27cRvCbUoMft0X2BVlQ8,617
27
- together/resources/chat/completions.py,sha256=kxQLILVAd-fUrx97oXPQWqXTsjYyCZy3dXf7bijpUEg,14470
28
+ together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxIY8uqGHcaU,14459
28
29
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
29
30
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
30
31
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
31
- together/resources/finetune.py,sha256=t8wOulAyNFKlHjHv_u4-PwdL-rR-klgEvDEfH-efdkQ,15970
32
+ together/resources/finetune.py,sha256=c0nkOepUUjlwnvl8txWsMedVI4Q3-HcfquPBfP12Qkw,19047
32
33
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
33
34
  together/resources/models.py,sha256=2dtHhXAqTDOOpwSbYLzWcKTC0-m2Szlb7LDYvp7Jr4w,1786
34
35
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
35
36
  together/together_response.py,sha256=MhczUCPem93cjX-A1TOAUrRj3sO-o3SLcEcTsZgVzQI,1319
36
- together/types/__init__.py,sha256=ghMiyyR2UzY-Io9Ck3ocwmS6_XSO9VaYWwbLqPDSZfo,1681
37
+ together/types/__init__.py,sha256=oHZCMC0H3j1ykf7ZRgxIU0QBA534EMpfKqRaa9SdgOo,1739
37
38
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
38
- together/types/chat_completions.py,sha256=StPaDxC-F6HOTmvKOYgp1rXP2wB_gQ6QEJxbOjedcWA,4490
39
+ together/types/chat_completions.py,sha256=d24F3VfT7uVnmaEk7Fn-O7qkGUg_AQQzR7vPwlXVDXw,4882
39
40
  together/types/common.py,sha256=4ZeIgqGioqhIC-nNxY90czNPp-kAqboMulw6-1z6ShM,1511
40
41
  together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4,2971
41
42
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
42
43
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
43
44
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
44
- together/types/finetune.py,sha256=UqZH98L3vVxZ6vykE5rmZFdpYQrqrkTvotZIiyqAME0,7362
45
+ together/types/finetune.py,sha256=8oYZPNgMWw6xGT3kRzAF4LlWjSfVti4uDVi6FoVJDM0,7814
45
46
  together/types/images.py,sha256=zX4Vt38tFDKU6yGb_hBY_N5eSTn3KPdpP5Ce_qnRHXQ,915
46
47
  together/types/models.py,sha256=K9Om3cCFexy7qzRSEXUj7gpCy1CVb1hHx7MGG-hvTLw,1035
47
48
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
48
- together/utils/__init__.py,sha256=VpjeRTya1m5eEE-Qe1zYTFsNAvuEA-dy7M2eG9Xu4fc,662
49
- together/utils/_log.py,sha256=yzdOV6iBEsyqF8UVvKhZm-ATtRokm34V-dXjTv3WKdE,1665
49
+ together/utils/__init__.py,sha256=n1kmLiaExT9YOKT5ye--dC4tW2qcHeicKX0GR86U640,698
50
+ together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
50
51
  together/utils/api_helpers.py,sha256=RSF7SRhbjHzroMOSWAXscflByM1r1ta_1SpxkAT22iE,2407
51
52
  together/utils/files.py,sha256=gMLthqfP5hKxVAerHMdy7gLXzdfY6lyOXdpW24Y4X3I,7165
52
53
  together/utils/tools.py,sha256=3-lXWP3cBCzOVSZg9tr5zOT1jaVeKAKVWxO2fcXZTh8,1788
53
54
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
54
- together-1.2.12.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
55
- together-1.2.12.dist-info/METADATA,sha256=FOar9eWf8dWhdiZ3x_ALmz14KJT5TqTkrIcHzVk_9X8,11813
56
- together-1.2.12.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
57
- together-1.2.12.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
58
- together-1.2.12.dist-info/RECORD,,
55
+ together-1.3.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
56
+ together-1.3.0.dist-info/METADATA,sha256=cPKfs6LBr2fFiaA3Pn0LeZASHpZhIiHh_AyI6aQBYxQ,11855
57
+ together-1.3.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
58
+ together-1.3.0.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
59
+ together-1.3.0.dist-info/RECORD,,