together 1.2.7__tar.gz → 1.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {together-1.2.7 → together-1.2.9}/PKG-INFO +1 -1
  2. {together-1.2.7 → together-1.2.9}/pyproject.toml +2 -2
  3. {together-1.2.7 → together-1.2.9}/src/together/cli/api/finetune.py +40 -1
  4. {together-1.2.7 → together-1.2.9}/src/together/client.py +4 -0
  5. {together-1.2.7 → together-1.2.9}/src/together/resources/__init__.py +3 -0
  6. {together-1.2.7 → together-1.2.9}/src/together/resources/finetune.py +43 -2
  7. together-1.2.9/src/together/resources/rerank.py +124 -0
  8. {together-1.2.7 → together-1.2.9}/src/together/types/__init__.py +6 -1
  9. {together-1.2.7 → together-1.2.9}/src/together/types/finetune.py +33 -4
  10. {together-1.2.7 → together-1.2.9}/src/together/types/models.py +1 -0
  11. together-1.2.9/src/together/types/rerank.py +43 -0
  12. {together-1.2.7 → together-1.2.9}/LICENSE +0 -0
  13. {together-1.2.7 → together-1.2.9}/README.md +0 -0
  14. {together-1.2.7 → together-1.2.9}/src/together/__init__.py +0 -0
  15. {together-1.2.7 → together-1.2.9}/src/together/abstract/__init__.py +0 -0
  16. {together-1.2.7 → together-1.2.9}/src/together/abstract/api_requestor.py +0 -0
  17. {together-1.2.7 → together-1.2.9}/src/together/cli/__init__.py +0 -0
  18. {together-1.2.7 → together-1.2.9}/src/together/cli/api/__init__.py +0 -0
  19. {together-1.2.7 → together-1.2.9}/src/together/cli/api/chat.py +0 -0
  20. {together-1.2.7 → together-1.2.9}/src/together/cli/api/completions.py +0 -0
  21. {together-1.2.7 → together-1.2.9}/src/together/cli/api/files.py +0 -0
  22. {together-1.2.7 → together-1.2.9}/src/together/cli/api/images.py +0 -0
  23. {together-1.2.7 → together-1.2.9}/src/together/cli/api/models.py +0 -0
  24. {together-1.2.7 → together-1.2.9}/src/together/cli/cli.py +0 -0
  25. {together-1.2.7 → together-1.2.9}/src/together/constants.py +0 -0
  26. {together-1.2.7 → together-1.2.9}/src/together/error.py +0 -0
  27. {together-1.2.7 → together-1.2.9}/src/together/filemanager.py +0 -0
  28. {together-1.2.7 → together-1.2.9}/src/together/legacy/__init__.py +0 -0
  29. {together-1.2.7 → together-1.2.9}/src/together/legacy/base.py +0 -0
  30. {together-1.2.7 → together-1.2.9}/src/together/legacy/complete.py +0 -0
  31. {together-1.2.7 → together-1.2.9}/src/together/legacy/embeddings.py +0 -0
  32. {together-1.2.7 → together-1.2.9}/src/together/legacy/files.py +0 -0
  33. {together-1.2.7 → together-1.2.9}/src/together/legacy/finetune.py +0 -0
  34. {together-1.2.7 → together-1.2.9}/src/together/legacy/images.py +0 -0
  35. {together-1.2.7 → together-1.2.9}/src/together/legacy/models.py +0 -0
  36. {together-1.2.7 → together-1.2.9}/src/together/resources/chat/__init__.py +0 -0
  37. {together-1.2.7 → together-1.2.9}/src/together/resources/chat/completions.py +0 -0
  38. {together-1.2.7 → together-1.2.9}/src/together/resources/completions.py +0 -0
  39. {together-1.2.7 → together-1.2.9}/src/together/resources/embeddings.py +0 -0
  40. {together-1.2.7 → together-1.2.9}/src/together/resources/files.py +0 -0
  41. {together-1.2.7 → together-1.2.9}/src/together/resources/images.py +0 -0
  42. {together-1.2.7 → together-1.2.9}/src/together/resources/models.py +0 -0
  43. {together-1.2.7 → together-1.2.9}/src/together/together_response.py +0 -0
  44. {together-1.2.7 → together-1.2.9}/src/together/types/abstract.py +0 -0
  45. {together-1.2.7 → together-1.2.9}/src/together/types/chat_completions.py +0 -0
  46. {together-1.2.7 → together-1.2.9}/src/together/types/common.py +0 -0
  47. {together-1.2.7 → together-1.2.9}/src/together/types/completions.py +0 -0
  48. {together-1.2.7 → together-1.2.9}/src/together/types/embeddings.py +0 -0
  49. {together-1.2.7 → together-1.2.9}/src/together/types/error.py +0 -0
  50. {together-1.2.7 → together-1.2.9}/src/together/types/files.py +0 -0
  51. {together-1.2.7 → together-1.2.9}/src/together/types/images.py +0 -0
  52. {together-1.2.7 → together-1.2.9}/src/together/utils/__init__.py +0 -0
  53. {together-1.2.7 → together-1.2.9}/src/together/utils/_log.py +0 -0
  54. {together-1.2.7 → together-1.2.9}/src/together/utils/api_helpers.py +0 -0
  55. {together-1.2.7 → together-1.2.9}/src/together/utils/files.py +0 -0
  56. {together-1.2.7 → together-1.2.9}/src/together/utils/tools.py +0 -0
  57. {together-1.2.7 → together-1.2.9}/src/together/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.2.7
3
+ Version: 1.2.9
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
12
12
 
13
13
  [tool.poetry]
14
14
  name = "together"
15
- version = "1.2.7"
15
+ version = "1.2.9"
16
16
  authors = [
17
17
  "Together AI <support@together.ai>"
18
18
  ]
@@ -50,7 +50,7 @@ optional = true
50
50
 
51
51
  [tool.poetry.group.quality.dependencies]
52
52
  black = ">=23.1,<25.0"
53
- ruff = ">=0.3.2,<0.6.0"
53
+ ruff = ">=0.3.2,<0.7.0"
54
54
  types-tqdm = "^4.65.0.0"
55
55
  types-tabulate = "^0.9.0.3"
56
56
  pre-commit = "3.5.0"
@@ -7,6 +7,18 @@ from tabulate import tabulate
7
7
 
8
8
  from together import Together
9
9
  from together.utils import finetune_price_to_dollars, log_warn, parse_timestamp
10
+ from together.types.finetune import DownloadCheckpointType
11
+
12
+
13
+ class DownloadCheckpointTypeChoice(click.Choice):
14
+ def __init__(self) -> None:
15
+ super().__init__([ct.value for ct in DownloadCheckpointType])
16
+
17
+ def convert(
18
+ self, value: str, param: click.Parameter | None, ctx: click.Context | None
19
+ ) -> DownloadCheckpointType:
20
+ value = super().convert(value, param, ctx)
21
+ return DownloadCheckpointType(value)
10
22
 
11
23
 
12
24
  @click.group(name="fine-tuning")
@@ -23,6 +35,10 @@ def fine_tuning(ctx: click.Context) -> None:
23
35
  )
24
36
  @click.option("--model", type=str, required=True, help="Base model name")
25
37
  @click.option("--n-epochs", type=int, default=1, help="Number of epochs to train for")
38
+ @click.option(
39
+ "--validation-file", type=str, default="", help="Validation file ID from Files API"
40
+ )
41
+ @click.option("--n-evals", type=int, default=0, help="Number of evaluation loops")
26
42
  @click.option(
27
43
  "--n-checkpoints", type=int, default=1, help="Number of checkpoints to save"
28
44
  )
@@ -50,8 +66,10 @@ def fine_tuning(ctx: click.Context) -> None:
50
66
  def create(
51
67
  ctx: click.Context,
52
68
  training_file: str,
69
+ validation_file: str,
53
70
  model: str,
54
71
  n_epochs: int,
72
+ n_evals: int,
55
73
  n_checkpoints: int,
56
74
  batch_size: int,
57
75
  learning_rate: float,
@@ -80,11 +98,21 @@ def create(
80
98
  f"You set LoRA parameter `{param}` for a full fine-tuning job. "
81
99
  f"Please change the job type with --lora or remove `{param}` from the arguments"
82
100
  )
101
+ if n_evals <= 0 and validation_file:
102
+ log_warn(
103
+ "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
104
+ )
105
+ elif n_evals > 0 and not validation_file:
106
+ raise click.BadParameter(
107
+ "You have specified a number of evaluation loops but no validation file."
108
+ )
83
109
 
84
110
  response = client.fine_tuning.create(
85
111
  training_file=training_file,
86
112
  model=model,
87
113
  n_epochs=n_epochs,
114
+ validation_file=validation_file,
115
+ n_evals=n_evals,
88
116
  n_checkpoints=n_checkpoints,
89
117
  batch_size=batch_size,
90
118
  learning_rate=learning_rate,
@@ -215,17 +243,28 @@ def list_events(ctx: click.Context, fine_tune_id: str) -> None:
215
243
  default=-1,
216
244
  help="Download fine-tuning checkpoint. Defaults to latest.",
217
245
  )
246
+ @click.option(
247
+ "--checkpoint-type",
248
+ type=DownloadCheckpointTypeChoice(),
249
+ required=False,
250
+ default=DownloadCheckpointType.DEFAULT.value,
251
+ help="Specifies checkpoint type. 'merged' and 'adapter' options work only for LoRA jobs.",
252
+ )
218
253
  def download(
219
254
  ctx: click.Context,
220
255
  fine_tune_id: str,
221
256
  output_dir: str,
222
257
  checkpoint_step: int,
258
+ checkpoint_type: DownloadCheckpointType,
223
259
  ) -> None:
224
260
  """Download fine-tuning checkpoint"""
225
261
  client: Together = ctx.obj
226
262
 
227
263
  response = client.fine_tuning.download(
228
- fine_tune_id, output=output_dir, checkpoint_step=checkpoint_step
264
+ fine_tune_id,
265
+ output=output_dir,
266
+ checkpoint_step=checkpoint_step,
267
+ checkpoint_type=checkpoint_type,
229
268
  )
230
269
 
231
270
  click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
@@ -18,6 +18,7 @@ class Together:
18
18
  images: resources.Images
19
19
  models: resources.Models
20
20
  fine_tuning: resources.FineTuning
21
+ rerank: resources.Rerank
21
22
 
22
23
  # client options
23
24
  client: TogetherClient
@@ -77,6 +78,7 @@ class Together:
77
78
  self.images = resources.Images(self.client)
78
79
  self.models = resources.Models(self.client)
79
80
  self.fine_tuning = resources.FineTuning(self.client)
81
+ self.rerank = resources.Rerank(self.client)
80
82
 
81
83
 
82
84
  class AsyncTogether:
@@ -87,6 +89,7 @@ class AsyncTogether:
87
89
  images: resources.AsyncImages
88
90
  models: resources.AsyncModels
89
91
  fine_tuning: resources.AsyncFineTuning
92
+ rerank: resources.AsyncRerank
90
93
 
91
94
  # client options
92
95
  client: TogetherClient
@@ -146,6 +149,7 @@ class AsyncTogether:
146
149
  self.images = resources.AsyncImages(self.client)
147
150
  self.models = resources.AsyncModels(self.client)
148
151
  self.fine_tuning = resources.AsyncFineTuning(self.client)
152
+ self.rerank = resources.AsyncRerank(self.client)
149
153
 
150
154
 
151
155
  Client = Together
@@ -5,6 +5,7 @@ from together.resources.files import AsyncFiles, Files
5
5
  from together.resources.finetune import AsyncFineTuning, FineTuning
6
6
  from together.resources.images import AsyncImages, Images
7
7
  from together.resources.models import AsyncModels, Models
8
+ from together.resources.rerank import AsyncRerank, Rerank
8
9
 
9
10
 
10
11
  __all__ = [
@@ -22,4 +23,6 @@ __all__ = [
22
23
  "Images",
23
24
  "AsyncModels",
24
25
  "Models",
26
+ "AsyncRerank",
27
+ "Rerank",
25
28
  ]
@@ -17,6 +17,7 @@ from together.types import (
17
17
  TogetherRequest,
18
18
  TrainingType,
19
19
  )
20
+ from together.types.finetune import DownloadCheckpointType
20
21
  from together.utils import log_warn, normalize_key
21
22
 
22
23
 
@@ -30,6 +31,8 @@ class FineTuning:
30
31
  training_file: str,
31
32
  model: str,
32
33
  n_epochs: int = 1,
34
+ validation_file: str | None = "",
35
+ n_evals: int | None = 0,
33
36
  n_checkpoints: int | None = 1,
34
37
  batch_size: int | None = 16,
35
38
  learning_rate: float | None = 0.00001,
@@ -48,6 +51,8 @@ class FineTuning:
48
51
  training_file (str): File-ID of a file uploaded to the Together API
49
52
  model (str): Name of the base model to run fine-tune job on
50
53
  n_epochs (int, optional): Number of epochs for fine-tuning. Defaults to 1.
54
+ validation file (str, optional): File ID of a file uploaded to the Together API for validation.
55
+ n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
51
56
  n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
52
57
  Defaults to 1.
53
58
  batch_size (int, optional): Batch size for fine-tuning. Defaults to 32.
@@ -83,7 +88,9 @@ class FineTuning:
83
88
  parameter_payload = FinetuneRequest(
84
89
  model=model,
85
90
  training_file=training_file,
91
+ validation_file=validation_file,
86
92
  n_epochs=n_epochs,
93
+ n_evals=n_evals,
87
94
  n_checkpoints=n_checkpoints,
88
95
  batch_size=batch_size,
89
96
  learning_rate=learning_rate,
@@ -222,7 +229,12 @@ class FineTuning:
222
229
  return FinetuneListEvents(**response.data)
223
230
 
224
231
  def download(
225
- self, id: str, *, output: Path | str | None = None, checkpoint_step: int = -1
232
+ self,
233
+ id: str,
234
+ *,
235
+ output: Path | str | None = None,
236
+ checkpoint_step: int = -1,
237
+ checkpoint_type: DownloadCheckpointType = DownloadCheckpointType.DEFAULT,
226
238
  ) -> FinetuneDownloadResult:
227
239
  """
228
240
  Downloads compressed fine-tuned model or checkpoint to local disk.
@@ -235,6 +247,8 @@ class FineTuning:
235
247
  Defaults to None.
236
248
  checkpoint_step (int, optional): Specifies step number for checkpoint to download.
237
249
  Defaults to -1 (download the final model)
250
+ checkpoint_type (CheckpointType, optional): Specifies which checkpoint to download.
251
+ Defaults to CheckpointType.DEFAULT.
238
252
 
239
253
  Returns:
240
254
  FinetuneDownloadResult: Object containing downloaded model metadata
@@ -245,7 +259,28 @@ class FineTuning:
245
259
  if checkpoint_step > 0:
246
260
  url += f"&checkpoint_step={checkpoint_step}"
247
261
 
248
- remote_name = self.retrieve(id).output_name
262
+ ft_job = self.retrieve(id)
263
+
264
+ if isinstance(ft_job.training_type, FullTrainingType):
265
+ if checkpoint_type != DownloadCheckpointType.DEFAULT:
266
+ raise ValueError(
267
+ "Only DEFAULT checkpoint type is allowed for FullTrainingType"
268
+ )
269
+ url += f"&checkpoint=modelOutputPath"
270
+ elif isinstance(ft_job.training_type, LoRATrainingType):
271
+ if checkpoint_type == DownloadCheckpointType.DEFAULT:
272
+ checkpoint_type = DownloadCheckpointType.MERGED
273
+
274
+ if checkpoint_type == DownloadCheckpointType.MERGED:
275
+ url += f"&checkpoint={DownloadCheckpointType.MERGED.value}"
276
+ elif checkpoint_type == DownloadCheckpointType.ADAPTER:
277
+ url += f"&checkpoint={DownloadCheckpointType.ADAPTER.value}"
278
+ else:
279
+ raise ValueError(
280
+ f"Invalid checkpoint type for LoRATrainingType: {checkpoint_type}"
281
+ )
282
+
283
+ remote_name = ft_job.output_name
249
284
 
250
285
  download_manager = DownloadManager(self._client)
251
286
 
@@ -275,6 +310,8 @@ class AsyncFineTuning:
275
310
  training_file: str,
276
311
  model: str,
277
312
  n_epochs: int = 1,
313
+ validation_file: str | None = "",
314
+ n_evals: int = 0,
278
315
  n_checkpoints: int | None = 1,
279
316
  batch_size: int | None = 32,
280
317
  learning_rate: float = 0.00001,
@@ -288,6 +325,8 @@ class AsyncFineTuning:
288
325
  training_file (str): File-ID of a file uploaded to the Together API
289
326
  model (str): Name of the base model to run fine-tune job on
290
327
  n_epochs (int, optional): Number of epochs for fine-tuning. Defaults to 1.
328
+ validation file (str, optional): File ID of a file uploaded to the Together API for validation.
329
+ n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
291
330
  n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
292
331
  Defaults to 1.
293
332
  batch_size (int, optional): Batch size for fine-tuning. Defaults to 32.
@@ -309,7 +348,9 @@ class AsyncFineTuning:
309
348
  parameter_payload = FinetuneRequest(
310
349
  model=model,
311
350
  training_file=training_file,
351
+ validation_file=validation_file,
312
352
  n_epochs=n_epochs,
353
+ n_evals=n_evals,
313
354
  n_checkpoints=n_checkpoints,
314
355
  batch_size=batch_size,
315
356
  learning_rate=learning_rate,
@@ -0,0 +1,124 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import List, Dict, Any
4
+
5
+ from together.abstract import api_requestor
6
+ from together.together_response import TogetherResponse
7
+ from together.types import (
8
+ RerankRequest,
9
+ RerankResponse,
10
+ TogetherClient,
11
+ TogetherRequest,
12
+ )
13
+
14
+
15
+ class Rerank:
16
+ def __init__(self, client: TogetherClient) -> None:
17
+ self._client = client
18
+
19
+ def create(
20
+ self,
21
+ *,
22
+ model: str,
23
+ query: str,
24
+ documents: List[str] | List[Dict[str, Any]],
25
+ top_n: int | None = None,
26
+ return_documents: bool = False,
27
+ rank_fields: List[str] | None = None,
28
+ ) -> RerankResponse:
29
+ """
30
+ Method to generate completions based on a given prompt using a specified model.
31
+
32
+ Args:
33
+ model (str): The name of the model to query.
34
+ query (str): The input query or list of queries to rerank.
35
+ documents (List[str] | List[Dict[str, Any]]): List of documents to be reranked.
36
+ top_n (int | None): Number of top results to return.
37
+ return_documents (bool): Flag to indicate whether to return documents.
38
+ rank_fields (List[str] | None): Fields to be used for ranking the documents.
39
+
40
+ Returns:
41
+ RerankResponse: Object containing reranked scores and documents
42
+ """
43
+
44
+ requestor = api_requestor.APIRequestor(
45
+ client=self._client,
46
+ )
47
+
48
+ parameter_payload = RerankRequest(
49
+ model=model,
50
+ query=query,
51
+ documents=documents,
52
+ top_n=top_n,
53
+ return_documents=return_documents,
54
+ rank_fields=rank_fields,
55
+ ).model_dump(exclude_none=True)
56
+
57
+ response, _, _ = requestor.request(
58
+ options=TogetherRequest(
59
+ method="POST",
60
+ url="rerank",
61
+ params=parameter_payload,
62
+ ),
63
+ stream=False,
64
+ )
65
+
66
+ assert isinstance(response, TogetherResponse)
67
+
68
+ return RerankResponse(**response.data)
69
+
70
+
71
+ class AsyncRerank:
72
+ def __init__(self, client: TogetherClient) -> None:
73
+ self._client = client
74
+
75
+ async def create(
76
+ self,
77
+ *,
78
+ model: str,
79
+ query: str,
80
+ documents: List[str] | List[Dict[str, Any]],
81
+ top_n: int | None = None,
82
+ return_documents: bool = False,
83
+ rank_fields: List[str] | None = None,
84
+ ) -> RerankResponse:
85
+ """
86
+ Async method to generate completions based on a given prompt using a specified model.
87
+
88
+ Args:
89
+ model (str): The name of the model to query.
90
+ query (str): The input query or list of queries to rerank.
91
+ documents (List[str] | List[Dict[str, Any]]): List of documents to be reranked.
92
+ top_n (int | None): Number of top results to return.
93
+ return_documents (bool): Flag to indicate whether to return documents.
94
+ rank_fields (List[str] | None): Fields to be used for ranking the documents.
95
+
96
+ Returns:
97
+ RerankResponse: Object containing reranked scores and documents
98
+ """
99
+
100
+ requestor = api_requestor.APIRequestor(
101
+ client=self._client,
102
+ )
103
+
104
+ parameter_payload = RerankRequest(
105
+ model=model,
106
+ query=query,
107
+ documents=documents,
108
+ top_n=top_n,
109
+ return_documents=return_documents,
110
+ rank_fields=rank_fields,
111
+ ).model_dump(exclude_none=True)
112
+
113
+ response, _, _ = await requestor.arequest(
114
+ options=TogetherRequest(
115
+ method="POST",
116
+ url="rerank",
117
+ params=parameter_payload,
118
+ ),
119
+ stream=False,
120
+ )
121
+
122
+ assert isinstance(response, TogetherResponse)
123
+
124
+ return RerankResponse(**response.data)
@@ -35,7 +35,10 @@ from together.types.images import (
35
35
  ImageResponse,
36
36
  )
37
37
  from together.types.models import ModelObject
38
-
38
+ from together.types.rerank import (
39
+ RerankRequest,
40
+ RerankResponse,
41
+ )
39
42
 
40
43
  __all__ = [
41
44
  "TogetherClient",
@@ -66,4 +69,6 @@ __all__ = [
66
69
  "TrainingType",
67
70
  "FullTrainingType",
68
71
  "LoRATrainingType",
72
+ "RerankRequest",
73
+ "RerankResponse",
69
74
  ]
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from enum import Enum
4
4
  from typing import List, Literal
5
5
 
6
- from pydantic import Field
6
+ from pydantic import Field, validator, field_validator
7
7
 
8
8
  from together.types.abstract import BaseModel
9
9
  from together.types.common import (
@@ -61,6 +61,7 @@ class FinetuneEventType(str, Enum):
61
61
  CHECKPOINT_SAVE = "CHECKPOINT_SAVE"
62
62
  BILLING_LIMIT = "BILLING_LIMIT"
63
63
  EPOCH_COMPLETE = "EPOCH_COMPLETE"
64
+ EVAL_COMPLETE = "EVAL_COMPLETE"
64
65
  TRAINING_COMPLETE = "TRAINING_COMPLETE"
65
66
  MODEL_COMPRESSING = "COMPRESSING_MODEL"
66
67
  MODEL_COMPRESSION_COMPLETE = "MODEL_COMPRESSION_COMPLETE"
@@ -75,6 +76,12 @@ class FinetuneEventType(str, Enum):
75
76
  WARNING = "WARNING"
76
77
 
77
78
 
79
+ class DownloadCheckpointType(Enum):
80
+ DEFAULT = "default"
81
+ MERGED = "merged"
82
+ ADAPTER = "adapter"
83
+
84
+
78
85
  class FinetuneEvent(BaseModel):
79
86
  """
80
87
  Fine-tune event type
@@ -123,8 +130,8 @@ class LoRATrainingType(TrainingType):
123
130
 
124
131
  lora_r: int
125
132
  lora_alpha: int
126
- lora_dropout: float
127
- lora_trainable_modules: str
133
+ lora_dropout: float = 0.0
134
+ lora_trainable_modules: str = "all-linear"
128
135
  type: str = "Lora"
129
136
 
130
137
 
@@ -135,6 +142,8 @@ class FinetuneRequest(BaseModel):
135
142
 
136
143
  # training file ID
137
144
  training_file: str
145
+ # validation file id
146
+ validation_file: str | None = None
138
147
  # base model string
139
148
  model: str
140
149
  # number of epochs to train for
@@ -143,6 +152,8 @@ class FinetuneRequest(BaseModel):
143
152
  learning_rate: float
144
153
  # number of checkpoints to save
145
154
  n_checkpoints: int | None = None
155
+ # number of evaluation loops to run
156
+ n_evals: int | None = None
146
157
  # training batch size
147
158
  batch_size: int | None = None
148
159
  # up to 40 character suffix for output model name
@@ -173,6 +184,8 @@ class FinetuneResponse(BaseModel):
173
184
  n_epochs: int | None = None
174
185
  # number of checkpoints to save
175
186
  n_checkpoints: int | None = None
187
+ # number of evaluation loops
188
+ n_evals: int | None = None
176
189
  # training batch size
177
190
  batch_size: int | None = None
178
191
  # training learning rate
@@ -180,7 +193,7 @@ class FinetuneResponse(BaseModel):
180
193
  # number of steps between evals
181
194
  eval_steps: int | None = None
182
195
  # training type
183
- training_type: FullTrainingType | LoRATrainingType | None = None
196
+ training_type: TrainingType | None = None
184
197
  # created/updated datetime stamps
185
198
  created_at: str | None = None
186
199
  updated_at: str | None = None
@@ -196,8 +209,14 @@ class FinetuneResponse(BaseModel):
196
209
  param_count: int | None = None
197
210
  # fine-tune job price
198
211
  total_price: int | None = None
212
+ # total number of training steps
213
+ total_steps: int | None = None
214
+ # number of steps completed (incrementing counter)
215
+ steps_completed: int | None = None
199
216
  # number of epochs completed (incrementing counter)
200
217
  epochs_completed: int | None = None
218
+ # number of evaluation loops completed (incrementing counter)
219
+ evals_completed: int | None = None
201
220
  # place in job queue (decrementing counter)
202
221
  queue_depth: int | None = None
203
222
  # weights & biases project name
@@ -208,6 +227,16 @@ class FinetuneResponse(BaseModel):
208
227
  training_file_num_lines: int | None = Field(None, alias="TrainingFileNumLines")
209
228
  training_file_size: int | None = Field(None, alias="TrainingFileSize")
210
229
 
230
+ @field_validator("training_type")
231
+ @classmethod
232
+ def validate_training_type(cls, v: TrainingType) -> TrainingType:
233
+ if v.type == "Full":
234
+ return FullTrainingType(**v.model_dump())
235
+ elif v.type == "Lora":
236
+ return LoRATrainingType(**v.model_dump())
237
+ else:
238
+ raise ValueError("Unknown training type")
239
+
211
240
 
212
241
  class FinetuneList(BaseModel):
213
242
  # object type
@@ -14,6 +14,7 @@ class ModelType(str, Enum):
14
14
  IMAGE = "image"
15
15
  EMBEDDING = "embedding"
16
16
  MODERATION = "moderation"
17
+ RERANK = "rerank"
17
18
 
18
19
 
19
20
  class PricingObject(BaseModel):
@@ -0,0 +1,43 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import List, Literal, Dict, Any
4
+
5
+ from together.types.abstract import BaseModel
6
+ from together.types.common import UsageData
7
+
8
+
9
+ class RerankRequest(BaseModel):
10
+ # model to query
11
+ model: str
12
+ # input or list of inputs
13
+ query: str
14
+ # list of documents
15
+ documents: List[str] | List[Dict[str, Any]]
16
+ # return top_n results
17
+ top_n: int | None = None
18
+ # boolean to return documents
19
+ return_documents: bool = False
20
+ # field selector for documents
21
+ rank_fields: List[str] | None = None
22
+
23
+
24
+ class RerankChoicesData(BaseModel):
25
+ # response index
26
+ index: int
27
+ # object type
28
+ relevance_score: float
29
+ # rerank response
30
+ document: Dict[str, Any] | None = None
31
+
32
+
33
+ class RerankResponse(BaseModel):
34
+ # job id
35
+ id: str | None = None
36
+ # object type
37
+ object: Literal["rerank"] | None = None
38
+ # query model
39
+ model: str | None = None
40
+ # list of reranked results
41
+ results: List[RerankChoicesData] | None = None
42
+ # usage stats
43
+ usage: UsageData | None = None
File without changes
File without changes
File without changes