together 1.5.7__py3-none-any.whl → 1.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -82,7 +82,7 @@ def endpoints(ctx: click.Context) -> None:
82
82
  @click.option(
83
83
  "--model",
84
84
  required=True,
85
- help="The model to deploy (e.g. mistralai/Mixtral-8x7B-Instruct-v0.1)",
85
+ help="The model to deploy (e.g. meta-llama/Llama-4-Scout-17B-16E-Instruct)",
86
86
  )
87
87
  @click.option(
88
88
  "--min-replicas",
@@ -1,10 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
+ import re
4
5
  from datetime import datetime, timezone
5
6
  from textwrap import wrap
6
7
  from typing import Any, Literal
7
- import re
8
8
 
9
9
  import click
10
10
  from click.core import ParameterSource # type: ignore[attr-defined]
@@ -13,17 +13,17 @@ from tabulate import tabulate
13
13
 
14
14
  from together import Together
15
15
  from together.cli.api.utils import BOOL_WITH_AUTO, INT_WITH_MAX
16
+ from together.types.finetune import (
17
+ DownloadCheckpointType,
18
+ FinetuneEventType,
19
+ FinetuneTrainingLimits,
20
+ )
16
21
  from together.utils import (
17
22
  finetune_price_to_dollars,
23
+ format_timestamp,
18
24
  log_warn,
19
25
  log_warn_once,
20
26
  parse_timestamp,
21
- format_timestamp,
22
- )
23
- from together.types.finetune import (
24
- DownloadCheckpointType,
25
- FinetuneTrainingLimits,
26
- FinetuneEventType,
27
27
  )
28
28
 
29
29
 
@@ -348,9 +348,9 @@ def list(ctx: click.Context) -> None:
348
348
  "Model Output Name": "\n".join(wrap(i.output_name or "", width=30)),
349
349
  "Status": i.status,
350
350
  "Created At": i.created_at,
351
- "Price": f"""${finetune_price_to_dollars(
352
- float(str(i.total_price))
353
- )}""", # convert to string for mypy typing
351
+ "Price": f"""${
352
+ finetune_price_to_dollars(float(str(i.total_price)))
353
+ }""", # convert to string for mypy typing
354
354
  }
355
355
  )
356
356
  table = tabulate(display_list, headers="keys", tablefmt="grid", showindex=True)
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import re
4
4
  from pathlib import Path
5
- from typing import List, Literal
5
+ from typing import List, Dict, Literal
6
6
 
7
7
  from rich import print as rprint
8
8
 
@@ -30,16 +30,8 @@ from together.types import (
30
30
  TrainingMethodSFT,
31
31
  TrainingType,
32
32
  )
33
- from together.types.finetune import (
34
- DownloadCheckpointType,
35
- FinetuneEvent,
36
- FinetuneEventType,
37
- )
38
- from together.utils import (
39
- get_event_step,
40
- log_warn_once,
41
- normalize_key,
42
- )
33
+ from together.types.finetune import DownloadCheckpointType
34
+ from together.utils import log_warn_once, normalize_key
43
35
 
44
36
 
45
37
  _FT_JOB_WITH_STEP_REGEX = r"^ft-[\dabcdef-]+:\d+$"
@@ -77,7 +69,7 @@ def create_finetune_request(
77
69
  wandb_base_url: str | None = None,
78
70
  wandb_project_name: str | None = None,
79
71
  wandb_name: str | None = None,
80
- train_on_inputs: bool | Literal["auto"] = "auto",
72
+ train_on_inputs: bool | Literal["auto"] | None = None,
81
73
  training_method: str = "sft",
82
74
  dpo_beta: float | None = None,
83
75
  from_checkpoint: str | None = None,
@@ -109,6 +101,11 @@ def create_finetune_request(
109
101
  raise ValueError(
110
102
  f"LoRA adapters are not supported for the selected model ({model_or_checkpoint})."
111
103
  )
104
+
105
+ if lora_dropout is not None:
106
+ if not 0 <= lora_dropout < 1.0:
107
+ raise ValueError("LoRA dropout must be in [0, 1) range.")
108
+
112
109
  lora_r = lora_r if lora_r is not None else model_limits.lora_training.max_rank
113
110
  lora_alpha = lora_alpha if lora_alpha is not None else lora_r * 2
114
111
  training_type = LoRATrainingType(
@@ -174,6 +171,18 @@ def create_finetune_request(
174
171
  f"training_method must be one of {', '.join(AVAILABLE_TRAINING_METHODS)}"
175
172
  )
176
173
 
174
+ if train_on_inputs is not None and training_method != "sft":
175
+ raise ValueError("train_on_inputs is only supported for SFT training")
176
+
177
+ if train_on_inputs is None and training_method == "sft":
178
+ log_warn_once(
179
+ "train_on_inputs is not set for SFT training, it will be set to 'auto'"
180
+ )
181
+ train_on_inputs = "auto"
182
+
183
+ if dpo_beta is not None and training_method != "dpo":
184
+ raise ValueError("dpo_beta is only supported for DPO training")
185
+
177
186
  lr_scheduler: FinetuneLRScheduler
178
187
  if lr_scheduler_type == "cosine":
179
188
  if scheduler_num_cycles <= 0.0:
@@ -191,8 +200,10 @@ def create_finetune_request(
191
200
  lr_scheduler_args=LinearLRSchedulerArgs(min_lr_ratio=min_lr_ratio),
192
201
  )
193
202
 
194
- training_method_cls: TrainingMethodSFT | TrainingMethodDPO = TrainingMethodSFT()
195
- if training_method == "dpo":
203
+ training_method_cls: TrainingMethodSFT | TrainingMethodDPO
204
+ if training_method == "sft":
205
+ training_method_cls = TrainingMethodSFT(train_on_inputs=train_on_inputs)
206
+ elif training_method == "dpo":
196
207
  training_method_cls = TrainingMethodDPO(dpo_beta=dpo_beta)
197
208
 
198
209
  finetune_request = FinetuneRequest(
@@ -214,7 +225,6 @@ def create_finetune_request(
214
225
  wandb_base_url=wandb_base_url,
215
226
  wandb_project_name=wandb_project_name,
216
227
  wandb_name=wandb_name,
217
- train_on_inputs=train_on_inputs,
218
228
  training_method=training_method_cls,
219
229
  from_checkpoint=from_checkpoint,
220
230
  )
@@ -222,68 +232,38 @@ def create_finetune_request(
222
232
  return finetune_request
223
233
 
224
234
 
225
- def _process_checkpoints_from_events(
226
- events: List[FinetuneEvent], id: str
235
+ def _parse_raw_checkpoints(
236
+ checkpoints: List[Dict[str, str]], id: str
227
237
  ) -> List[FinetuneCheckpoint]:
228
238
  """
229
- Helper function to process events and create checkpoint list.
239
+ Helper function to process raw checkpoints and create checkpoint list.
230
240
 
231
241
  Args:
232
- events (List[FinetuneEvent]): List of fine-tune events to process
242
+ checkpoints (List[Dict[str, str]]): List of raw checkpoints metadata
233
243
  id (str): Fine-tune job ID
234
244
 
235
245
  Returns:
236
246
  List[FinetuneCheckpoint]: List of available checkpoints
237
247
  """
238
- checkpoints: List[FinetuneCheckpoint] = []
239
-
240
- for event in events:
241
- event_type = event.type
242
-
243
- if event_type == FinetuneEventType.CHECKPOINT_SAVE:
244
- step = get_event_step(event)
245
- checkpoint_name = f"{id}:{step}" if step is not None else id
246
-
247
- checkpoints.append(
248
- FinetuneCheckpoint(
249
- type=(
250
- f"Intermediate (step {step})"
251
- if step is not None
252
- else "Intermediate"
253
- ),
254
- timestamp=event.created_at,
255
- name=checkpoint_name,
256
- )
257
- )
258
- elif event_type == FinetuneEventType.JOB_COMPLETE:
259
- if hasattr(event, "model_path"):
260
- checkpoints.append(
261
- FinetuneCheckpoint(
262
- type=(
263
- "Final Merged"
264
- if hasattr(event, "adapter_path")
265
- else "Final"
266
- ),
267
- timestamp=event.created_at,
268
- name=id,
269
- )
270
- )
271
248
 
272
- if hasattr(event, "adapter_path"):
273
- checkpoints.append(
274
- FinetuneCheckpoint(
275
- type=(
276
- "Final Adapter" if hasattr(event, "model_path") else "Final"
277
- ),
278
- timestamp=event.created_at,
279
- name=id,
280
- )
281
- )
249
+ parsed_checkpoints = []
250
+ for checkpoint in checkpoints:
251
+ step = checkpoint["step"]
252
+ checkpoint_type = checkpoint["checkpoint_type"]
253
+ checkpoint_name = (
254
+ f"{id}:{step}" if "intermediate" in checkpoint_type.lower() else id
255
+ )
282
256
 
283
- # Sort by timestamp (newest first)
284
- checkpoints.sort(key=lambda x: x.timestamp, reverse=True)
257
+ parsed_checkpoints.append(
258
+ FinetuneCheckpoint(
259
+ type=checkpoint_type,
260
+ timestamp=checkpoint["created_at"],
261
+ name=checkpoint_name,
262
+ )
263
+ )
285
264
 
286
- return checkpoints
265
+ parsed_checkpoints.sort(key=lambda x: x.timestamp, reverse=True)
266
+ return parsed_checkpoints
287
267
 
288
268
 
289
269
  class FineTuning:
@@ -319,7 +299,7 @@ class FineTuning:
319
299
  wandb_name: str | None = None,
320
300
  verbose: bool = False,
321
301
  model_limits: FinetuneTrainingLimits | None = None,
322
- train_on_inputs: bool | Literal["auto"] = "auto",
302
+ train_on_inputs: bool | Literal["auto"] | None = None,
323
303
  training_method: str = "sft",
324
304
  dpo_beta: float | None = None,
325
305
  from_checkpoint: str | None = None,
@@ -364,12 +344,12 @@ class FineTuning:
364
344
  Defaults to False.
365
345
  model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
366
346
  Defaults to None.
367
- train_on_inputs (bool or "auto"): Whether to mask the user messages in conversational data or prompts in instruction data.
347
+ train_on_inputs (bool or "auto", optional): Whether to mask the user messages in conversational data or prompts in instruction data.
368
348
  "auto" will automatically determine whether to mask the inputs based on the data format.
369
349
  For datasets with the "text" field (general format), inputs will not be masked.
370
350
  For datasets with the "messages" field (conversational format) or "prompt" and "completion" fields
371
351
  (Instruction format), inputs will be masked.
372
- Defaults to "auto".
352
+ Defaults to None, or "auto" if training_method is "sft" (set in create_finetune_request).
373
353
  training_method (str, optional): Training method. Defaults to "sft".
374
354
  Supported methods: "sft", "dpo".
375
355
  dpo_beta (float, optional): DPO beta parameter. Defaults to None.
@@ -561,8 +541,21 @@ class FineTuning:
561
541
  Returns:
562
542
  List[FinetuneCheckpoint]: List of available checkpoints
563
543
  """
564
- events = self.list_events(id).data or []
565
- return _process_checkpoints_from_events(events, id)
544
+ requestor = api_requestor.APIRequestor(
545
+ client=self._client,
546
+ )
547
+
548
+ response, _, _ = requestor.request(
549
+ options=TogetherRequest(
550
+ method="GET",
551
+ url=f"fine-tunes/{id}/checkpoints",
552
+ ),
553
+ stream=False,
554
+ )
555
+ assert isinstance(response, TogetherResponse)
556
+
557
+ raw_checkpoints = response.data["data"]
558
+ return _parse_raw_checkpoints(raw_checkpoints, id)
566
559
 
567
560
  def download(
568
561
  self,
@@ -570,7 +563,7 @@ class FineTuning:
570
563
  *,
571
564
  output: Path | str | None = None,
572
565
  checkpoint_step: int | None = None,
573
- checkpoint_type: DownloadCheckpointType = DownloadCheckpointType.DEFAULT,
566
+ checkpoint_type: DownloadCheckpointType | str = DownloadCheckpointType.DEFAULT,
574
567
  ) -> FinetuneDownloadResult:
575
568
  """
576
569
  Downloads compressed fine-tuned model or checkpoint to local disk.
@@ -583,7 +576,7 @@ class FineTuning:
583
576
  Defaults to None.
584
577
  checkpoint_step (int, optional): Specifies step number for checkpoint to download.
585
578
  Defaults to -1 (download the final model)
586
- checkpoint_type (CheckpointType, optional): Specifies which checkpoint to download.
579
+ checkpoint_type (CheckpointType | str, optional): Specifies which checkpoint to download.
587
580
  Defaults to CheckpointType.DEFAULT.
588
581
 
589
582
  Returns:
@@ -607,6 +600,16 @@ class FineTuning:
607
600
 
608
601
  ft_job = self.retrieve(id)
609
602
 
603
+ # convert str to DownloadCheckpointType
604
+ if isinstance(checkpoint_type, str):
605
+ try:
606
+ checkpoint_type = DownloadCheckpointType(checkpoint_type.lower())
607
+ except ValueError:
608
+ enum_strs = ", ".join(e.value for e in DownloadCheckpointType)
609
+ raise ValueError(
610
+ f"Invalid checkpoint type: {checkpoint_type}. Choose one of {{{enum_strs}}}."
611
+ )
612
+
610
613
  if isinstance(ft_job.training_type, FullTrainingType):
611
614
  if checkpoint_type != DownloadCheckpointType.DEFAULT:
612
615
  raise ValueError(
@@ -617,10 +620,11 @@ class FineTuning:
617
620
  if checkpoint_type == DownloadCheckpointType.DEFAULT:
618
621
  checkpoint_type = DownloadCheckpointType.MERGED
619
622
 
620
- if checkpoint_type == DownloadCheckpointType.MERGED:
621
- url += f"&checkpoint={DownloadCheckpointType.MERGED.value}"
622
- elif checkpoint_type == DownloadCheckpointType.ADAPTER:
623
- url += f"&checkpoint={DownloadCheckpointType.ADAPTER.value}"
623
+ if checkpoint_type in {
624
+ DownloadCheckpointType.MERGED,
625
+ DownloadCheckpointType.ADAPTER,
626
+ }:
627
+ url += f"&checkpoint={checkpoint_type.value}"
624
628
  else:
625
629
  raise ValueError(
626
630
  f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}"
@@ -707,7 +711,7 @@ class AsyncFineTuning:
707
711
  wandb_name: str | None = None,
708
712
  verbose: bool = False,
709
713
  model_limits: FinetuneTrainingLimits | None = None,
710
- train_on_inputs: bool | Literal["auto"] = "auto",
714
+ train_on_inputs: bool | Literal["auto"] | None = None,
711
715
  training_method: str = "sft",
712
716
  dpo_beta: float | None = None,
713
717
  from_checkpoint: str | None = None,
@@ -757,7 +761,7 @@ class AsyncFineTuning:
757
761
  For datasets with the "text" field (general format), inputs will not be masked.
758
762
  For datasets with the "messages" field (conversational format) or "prompt" and "completion" fields
759
763
  (Instruction format), inputs will be masked.
760
- Defaults to "auto".
764
+ Defaults to None, or "auto" if training_method is "sft" (set in create_finetune_request).
761
765
  training_method (str, optional): Training method. Defaults to "sft".
762
766
  Supported methods: "sft", "dpo".
763
767
  dpo_beta (float, optional): DPO beta parameter. Defaults to None.
@@ -936,11 +940,9 @@ class AsyncFineTuning:
936
940
  ),
937
941
  stream=False,
938
942
  )
943
+ assert isinstance(events_response, TogetherResponse)
939
944
 
940
- # FIXME: API returns "data" field with no object type (should be "list")
941
- events_list = FinetuneListEvents(object="list", **events_response.data)
942
-
943
- return events_list
945
+ return FinetuneListEvents(**events_response.data)
944
946
 
945
947
  async def list_checkpoints(self, id: str) -> List[FinetuneCheckpoint]:
946
948
  """
@@ -950,11 +952,23 @@ class AsyncFineTuning:
950
952
  id (str): Unique identifier of the fine-tune job to list checkpoints for
951
953
 
952
954
  Returns:
953
- List[FinetuneCheckpoint]: Object containing list of available checkpoints
955
+ List[FinetuneCheckpoint]: List of available checkpoints
954
956
  """
955
- events_list = await self.list_events(id)
956
- events = events_list.data or []
957
- return _process_checkpoints_from_events(events, id)
957
+ requestor = api_requestor.APIRequestor(
958
+ client=self._client,
959
+ )
960
+
961
+ response, _, _ = await requestor.arequest(
962
+ options=TogetherRequest(
963
+ method="GET",
964
+ url=f"fine-tunes/{id}/checkpoints",
965
+ ),
966
+ stream=False,
967
+ )
968
+ assert isinstance(response, TogetherResponse)
969
+
970
+ raw_checkpoints = response.data["data"]
971
+ return _parse_raw_checkpoints(raw_checkpoints, id)
958
972
 
959
973
  async def download(
960
974
  self, id: str, *, output: str | None = None, checkpoint_step: int = -1
@@ -98,6 +98,7 @@ class ToolChoice(BaseModel):
98
98
 
99
99
  class ToolChoiceEnum(str, Enum):
100
100
  Auto = "auto"
101
+ Required = "required"
101
102
 
102
103
 
103
104
  class ChatCompletionRequest(BaseModel):
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from enum import Enum
4
4
  from typing import List, Literal, Any
5
5
 
6
- from pydantic import StrictBool, Field, field_validator
6
+ from pydantic import Field, StrictBool, field_validator
7
7
 
8
8
  from together.types.abstract import BaseModel
9
9
  from together.types.common import (
@@ -149,6 +149,7 @@ class TrainingMethodSFT(TrainingMethod):
149
149
  """
150
150
 
151
151
  method: Literal["sft"] = "sft"
152
+ train_on_inputs: StrictBool | Literal["auto"] = "auto"
152
153
 
153
154
 
154
155
  class TrainingMethodDPO(TrainingMethod):
@@ -201,8 +202,6 @@ class FinetuneRequest(BaseModel):
201
202
  wandb_name: str | None = None
202
203
  # training type
203
204
  training_type: FullTrainingType | LoRATrainingType | None = None
204
- # train on inputs
205
- train_on_inputs: StrictBool | Literal["auto"] = "auto"
206
205
  # training method
207
206
  training_method: TrainingMethodSFT | TrainingMethodDPO = Field(
208
207
  default_factory=TrainingMethodSFT
together/utils/files.py CHANGED
@@ -6,7 +6,6 @@ from pathlib import Path
6
6
  from traceback import format_exc
7
7
  from typing import Any, Dict, List
8
8
 
9
- from pyarrow import ArrowInvalid, parquet
10
9
 
11
10
  from together.constants import (
12
11
  MAX_FILE_SIZE_GB,
@@ -372,6 +371,14 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
372
371
 
373
372
 
374
373
  def _check_parquet(file: Path) -> Dict[str, Any]:
374
+ try:
375
+ # Pyarrow is optional as it's large (~80MB) and isn't compatible with older systems.
376
+ from pyarrow import ArrowInvalid, parquet
377
+ except ImportError:
378
+ raise ImportError(
379
+ "pyarrow is not installed and is required to use parquet files. Please install it via `pip install together[pyarrow]`"
380
+ )
381
+
375
382
  report_dict: Dict[str, Any] = {}
376
383
 
377
384
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: together
3
- Version: 1.5.7
3
+ Version: 1.5.11
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  License: Apache-2.0
6
6
  Author: Together AI
@@ -13,6 +13,7 @@ Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
15
  Classifier: Programming Language :: Python :: 3.13
16
+ Provides-Extra: pyarrow
16
17
  Requires-Dist: aiohttp (>=3.9.3,<4.0.0)
17
18
  Requires-Dist: click (>=8.1.7,<9.0.0)
18
19
  Requires-Dist: eval-type-backport (>=0.1.3,<0.3.0)
@@ -20,7 +21,7 @@ Requires-Dist: filelock (>=3.13.1,<4.0.0)
20
21
  Requires-Dist: numpy (>=1.23.5) ; python_version < "3.12"
21
22
  Requires-Dist: numpy (>=1.26.0) ; python_version >= "3.12"
22
23
  Requires-Dist: pillow (>=11.1.0,<12.0.0)
23
- Requires-Dist: pyarrow (>=10.0.1)
24
+ Requires-Dist: pyarrow (>=10.0.1) ; extra == "pyarrow"
24
25
  Requires-Dist: pydantic (>=2.6.3,<3.0.0)
25
26
  Requires-Dist: requests (>=2.31.0,<3.0.0)
26
27
  Requires-Dist: rich (>=13.8.1,<15.0.0)
@@ -92,7 +93,7 @@ client = Together()
92
93
 
93
94
  # Simple text message
94
95
  response = client.chat.completions.create(
95
- model="mistralai/Mixtral-8x7B-Instruct-v0.1",
96
+ model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
96
97
  messages=[{"role": "user", "content": "tell me about new york"}],
97
98
  )
98
99
  print(response.choices[0].message.content)
@@ -182,7 +183,7 @@ from together import Together
182
183
 
183
184
  client = Together()
184
185
  stream = client.chat.completions.create(
185
- model="mistralai/Mixtral-8x7B-Instruct-v0.1",
186
+ model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
186
187
  messages=[{"role": "user", "content": "tell me about new york"}],
187
188
  stream=True,
188
189
  )
@@ -207,7 +208,7 @@ async def async_chat_completion(messages):
207
208
  async_client = AsyncTogether()
208
209
  tasks = [
209
210
  async_client.chat.completions.create(
210
- model="mistralai/Mixtral-8x7B-Instruct-v0.1",
211
+ model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
211
212
  messages=[{"role": "user", "content": message}],
212
213
  )
213
214
  for message in messages
@@ -230,7 +231,7 @@ from together import Together
230
231
  client = Together()
231
232
 
232
233
  response = client.chat.completions.create(
233
- model="mistralai/Mixtral-8x7B-Instruct-v0.1",
234
+ model="meta-llama/Llama-3.2-3B-Instruct-Turbo",
234
235
  messages=[{"role": "user", "content": "tell me about new york"}],
235
236
  logprobs=1
236
237
  )
@@ -381,7 +382,7 @@ client.files.delete(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # deletes a
381
382
 
382
383
  ### Fine-tunes
383
384
 
384
- The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrive statuses and get checkpoints. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
385
+ The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrive statuses and get checkpoints. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-quickstart).
385
386
 
386
387
  ```python
387
388
  from together import Together
@@ -390,7 +391,7 @@ client = Together()
390
391
 
391
392
  client.fine_tuning.create(
392
393
  training_file = 'file-d0d318cb-b7d9-493a-bd70-1cfe089d3815',
393
- model = 'mistralai/Mixtral-8x7B-Instruct-v0.1',
394
+ model = 'meta-llama/Llama-3.2-3B-Instruct',
394
395
  n_epochs = 3,
395
396
  n_checkpoints = 1,
396
397
  batch_size = "max",
@@ -428,7 +429,7 @@ for model in models:
428
429
  together chat.completions \
429
430
  --message "system" "You are a helpful assistant named Together" \
430
431
  --message "user" "What is your name?" \
431
- --model mistralai/Mixtral-8x7B-Instruct-v0.1
432
+ --model meta-llama/Llama-4-Scout-17B-16E-Instruct
432
433
  ```
433
434
 
434
435
  The Chat Completions CLI enables streaming tokens to stdout by default. To disable streaming, use `--no-stream`.
@@ -438,7 +439,7 @@ The Chat Completions CLI enables streaming tokens to stdout by default. To disab
438
439
  ```bash
439
440
  together completions \
440
441
  "Large language models are " \
441
- --model mistralai/Mixtral-8x7B-v0.1 \
442
+ --model meta-llama/Llama-4-Scout-17B-16E-Instruct \
442
443
  --max-tokens 512 \
443
444
  --stop "."
444
445
  ```
@@ -5,9 +5,9 @@ together/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
- together/cli/api/endpoints.py,sha256=tM42PthzkpTjOkmL6twyWYXqEQw0DOqsEKTZdhp6vKU,13284
8
+ together/cli/api/endpoints.py,sha256=f6KafWZvRF6n_ThWdr3y9uhE6wPF37PcD45w_EtgXmY,13289
9
9
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
10
- together/cli/api/finetune.py,sha256=AnVa5sDB4ZCR_S-CMJStj1vzi0b2AwYQT2c3rkUnAbQ,16238
10
+ together/cli/api/finetune.py,sha256=y_FEWvoD7Mi07U2g-sCbPFN_qsBRf8T365xBEbpM2qc,16238
11
11
  together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
12
12
  together/cli/api/models.py,sha256=CXw8B1hqNkadogi58GIXhLg_dTJnvTBaE7Kq1_xQ-10,1423
13
13
  together/cli/api/utils.py,sha256=IuqYWPnLI38_Bqd7lj8V_SnGdYc59pRmMbQmciS4FsM,1326
@@ -34,7 +34,7 @@ together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGC
34
34
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
35
35
  together/resources/endpoints.py,sha256=NNjp-wyzOotzlscGGrANhOHxQBjHTN8f5kTQTH_CLvE,17177
36
36
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
37
- together/resources/finetune.py,sha256=7rcGfZqre1szzlh1MwseLlghZ9vRHwHAdeITvP2ohJY,36942
37
+ together/resources/finetune.py,sha256=FxIMIb1ZNjhrK7cE8bYhnwuRZSmLy_nK7HzMAp4XrNQ,37617
38
38
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
39
39
  together/resources/models.py,sha256=qgmAXv61Cq4oLxytenEZBywA8shldDHYxJ_EAu_4JWQ,3864
40
40
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
@@ -42,7 +42,7 @@ together/together_response.py,sha256=a3dgKMPDrlfKQwxYENfNt2T4l2vSZxRWMixhHSy-q3E
42
42
  together/types/__init__.py,sha256=VgIbE2AOK9c2TQUzkbRbyRkdia2COXJXl_wxPaoxR-M,2688
43
43
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
44
44
  together/types/audio_speech.py,sha256=jlj8BZf3dkIDARF1P11fuenVLj4try8Yx4RN-EAkhOU,2609
45
- together/types/chat_completions.py,sha256=ggwt1LlBXTB_hZKbtLsjg8j-gXxO8pUUQfTrxUmRXHU,5078
45
+ together/types/chat_completions.py,sha256=qpBCMXEWtRwW_fmiu6cecm3d4h6mcK8gvr-8JkbAopQ,5104
46
46
  together/types/code_interpreter.py,sha256=cjF8TKgRkJllHS4i24dWQZBGTRsG557eHSewOiip0Kk,1770
47
47
  together/types/common.py,sha256=kxZ-N9xtBsGYZBmbIWnZ0rfT3Pn8PFB7sAbp3iv96pw,1525
48
48
  together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4,2971
@@ -50,18 +50,18 @@ together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,
50
50
  together/types/endpoints.py,sha256=EzNhHOoQ_D9fUdNQtxQPeSWiFzdFLqpNodN0YLmv_h0,4393
51
51
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
52
52
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
53
- together/types/finetune.py,sha256=KVaos9wBjFZFvVoL4DBx3PlXjGAx7_0CTQxKzEZzf40,10940
53
+ together/types/finetune.py,sha256=Utdcm3kL_cDfBS3zjXwyHsuP2qFFjCQiQZOsPD-WlpE,10918
54
54
  together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
55
55
  together/types/models.py,sha256=nwQIZGHKZpX9I6mK8z56VW70YC6Ry6JGsVa0s99QVxc,1055
56
56
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
57
57
  together/utils/__init__.py,sha256=5fqvj4KT2rHxKSQot2TSyV_HcvkvkGiqAiaYuJwqtm0,786
58
58
  together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
59
59
  together/utils/api_helpers.py,sha256=2K0O6qeEQ2zVFvi5NBN5m2kjZJaS3-JfKFecQ7SmGaw,3746
60
- together/utils/files.py,sha256=rfp10qU0urtWOXXFeasFtO9xp-1KIhM3S43JxcnHmL0,16438
60
+ together/utils/files.py,sha256=btWQawwXbNKfPmCtRyObZViG1Xx-IPz45PrAtMXvcy8,16741
61
61
  together/utils/tools.py,sha256=H2MTJhEqtBllaDvOyZehIO_IVNK3P17rSDeILtJIVag,2964
62
62
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
63
- together-1.5.7.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
64
- together-1.5.7.dist-info/METADATA,sha256=c8znCsq8LslEJ8Y7Kep9Ld_fIUsL2hpKjgZGv21Yrk4,15415
65
- together-1.5.7.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
66
- together-1.5.7.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
67
- together-1.5.7.dist-info/RECORD,,
63
+ together-1.5.11.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
64
+ together-1.5.11.dist-info/METADATA,sha256=JhFDu1VPl18jD4YzuGcq4d6rp1ZNXNAf8k2qkuAjjB0,15497
65
+ together-1.5.11.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
66
+ together-1.5.11.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
67
+ together-1.5.11.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.2
2
+ Generator: poetry-core 2.1.3
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any