together 1.2.2__tar.gz → 1.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. {together-1.2.2 → together-1.2.3}/PKG-INFO +1 -1
  2. {together-1.2.2 → together-1.2.3}/pyproject.toml +1 -1
  3. {together-1.2.2 → together-1.2.3}/src/together/cli/api/finetune.py +42 -1
  4. {together-1.2.2 → together-1.2.3}/src/together/resources/finetune.py +23 -0
  5. {together-1.2.2 → together-1.2.3}/src/together/types/__init__.py +6 -0
  6. {together-1.2.2 → together-1.2.3}/src/together/types/finetune.py +33 -5
  7. {together-1.2.2 → together-1.2.3}/LICENSE +0 -0
  8. {together-1.2.2 → together-1.2.3}/README.md +0 -0
  9. {together-1.2.2 → together-1.2.3}/src/together/__init__.py +0 -0
  10. {together-1.2.2 → together-1.2.3}/src/together/abstract/__init__.py +0 -0
  11. {together-1.2.2 → together-1.2.3}/src/together/abstract/api_requestor.py +0 -0
  12. {together-1.2.2 → together-1.2.3}/src/together/cli/__init__.py +0 -0
  13. {together-1.2.2 → together-1.2.3}/src/together/cli/api/__init__.py +0 -0
  14. {together-1.2.2 → together-1.2.3}/src/together/cli/api/chat.py +0 -0
  15. {together-1.2.2 → together-1.2.3}/src/together/cli/api/completions.py +0 -0
  16. {together-1.2.2 → together-1.2.3}/src/together/cli/api/files.py +0 -0
  17. {together-1.2.2 → together-1.2.3}/src/together/cli/api/images.py +0 -0
  18. {together-1.2.2 → together-1.2.3}/src/together/cli/api/models.py +0 -0
  19. {together-1.2.2 → together-1.2.3}/src/together/cli/cli.py +0 -0
  20. {together-1.2.2 → together-1.2.3}/src/together/client.py +0 -0
  21. {together-1.2.2 → together-1.2.3}/src/together/constants.py +0 -0
  22. {together-1.2.2 → together-1.2.3}/src/together/error.py +0 -0
  23. {together-1.2.2 → together-1.2.3}/src/together/filemanager.py +0 -0
  24. {together-1.2.2 → together-1.2.3}/src/together/legacy/__init__.py +0 -0
  25. {together-1.2.2 → together-1.2.3}/src/together/legacy/base.py +0 -0
  26. {together-1.2.2 → together-1.2.3}/src/together/legacy/complete.py +0 -0
  27. {together-1.2.2 → together-1.2.3}/src/together/legacy/embeddings.py +0 -0
  28. {together-1.2.2 → together-1.2.3}/src/together/legacy/files.py +0 -0
  29. {together-1.2.2 → together-1.2.3}/src/together/legacy/finetune.py +0 -0
  30. {together-1.2.2 → together-1.2.3}/src/together/legacy/images.py +0 -0
  31. {together-1.2.2 → together-1.2.3}/src/together/legacy/models.py +0 -0
  32. {together-1.2.2 → together-1.2.3}/src/together/resources/__init__.py +0 -0
  33. {together-1.2.2 → together-1.2.3}/src/together/resources/chat/__init__.py +0 -0
  34. {together-1.2.2 → together-1.2.3}/src/together/resources/chat/completions.py +0 -0
  35. {together-1.2.2 → together-1.2.3}/src/together/resources/completions.py +0 -0
  36. {together-1.2.2 → together-1.2.3}/src/together/resources/embeddings.py +0 -0
  37. {together-1.2.2 → together-1.2.3}/src/together/resources/files.py +0 -0
  38. {together-1.2.2 → together-1.2.3}/src/together/resources/images.py +0 -0
  39. {together-1.2.2 → together-1.2.3}/src/together/resources/models.py +0 -0
  40. {together-1.2.2 → together-1.2.3}/src/together/together_response.py +0 -0
  41. {together-1.2.2 → together-1.2.3}/src/together/types/abstract.py +0 -0
  42. {together-1.2.2 → together-1.2.3}/src/together/types/chat_completions.py +0 -0
  43. {together-1.2.2 → together-1.2.3}/src/together/types/common.py +0 -0
  44. {together-1.2.2 → together-1.2.3}/src/together/types/completions.py +0 -0
  45. {together-1.2.2 → together-1.2.3}/src/together/types/embeddings.py +0 -0
  46. {together-1.2.2 → together-1.2.3}/src/together/types/error.py +0 -0
  47. {together-1.2.2 → together-1.2.3}/src/together/types/files.py +0 -0
  48. {together-1.2.2 → together-1.2.3}/src/together/types/images.py +0 -0
  49. {together-1.2.2 → together-1.2.3}/src/together/types/models.py +0 -0
  50. {together-1.2.2 → together-1.2.3}/src/together/utils/__init__.py +0 -0
  51. {together-1.2.2 → together-1.2.3}/src/together/utils/_log.py +0 -0
  52. {together-1.2.2 → together-1.2.3}/src/together/utils/api_helpers.py +0 -0
  53. {together-1.2.2 → together-1.2.3}/src/together/utils/files.py +0 -0
  54. {together-1.2.2 → together-1.2.3}/src/together/utils/tools.py +0 -0
  55. {together-1.2.2 → together-1.2.3}/src/together/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.2.2
3
+ Version: 1.2.3
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
12
12
 
13
13
  [tool.poetry]
14
14
  name = "together"
15
- version = "1.2.2"
15
+ version = "1.2.3"
16
16
  authors = [
17
17
  "Together AI <support@together.ai>"
18
18
  ]
@@ -2,6 +2,7 @@ import json
2
2
  from textwrap import wrap
3
3
 
4
4
  import click
5
+ from click.core import ParameterSource # type: ignore[attr-defined]
5
6
  from tabulate import tabulate
6
7
 
7
8
  from together import Together
@@ -26,7 +27,22 @@ def fine_tuning(ctx: click.Context) -> None:
26
27
  "--n-checkpoints", type=int, default=1, help="Number of checkpoints to save"
27
28
  )
28
29
  @click.option("--batch-size", type=int, default=32, help="Train batch size")
29
- @click.option("--learning-rate", type=float, default=3e-5, help="Learning rate")
30
+ @click.option("--learning-rate", type=float, default=1e-5, help="Learning rate")
31
+ @click.option(
32
+ "--lora/--no-lora",
33
+ type=bool,
34
+ default=False,
35
+ help="Whether to use LoRA adapters for fine-tuning",
36
+ )
37
+ @click.option("--lora-r", type=int, default=8, help="LoRA adapters' rank")
38
+ @click.option("--lora-dropout", type=float, default=0, help="LoRA adapters' dropout")
39
+ @click.option("--lora-alpha", type=float, default=8, help="LoRA adapters' alpha")
40
+ @click.option(
41
+ "--lora-trainable-modules",
42
+ type=str,
43
+ default="all-linear",
44
+ help="Trainable modules for LoRA adapters. For example, 'all-linear', 'q_proj,v_proj'",
45
+ )
30
46
  @click.option(
31
47
  "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
32
48
  )
@@ -39,12 +55,32 @@ def create(
39
55
  n_checkpoints: int,
40
56
  batch_size: int,
41
57
  learning_rate: float,
58
+ lora: bool,
59
+ lora_r: int,
60
+ lora_dropout: float,
61
+ lora_alpha: float,
62
+ lora_trainable_modules: str,
42
63
  suffix: str,
43
64
  wandb_api_key: str,
44
65
  ) -> None:
45
66
  """Start fine-tuning"""
46
67
  client: Together = ctx.obj
47
68
 
69
+ if lora:
70
+ learning_rate_source = click.get_current_context().get_parameter_source( # type: ignore[attr-defined]
71
+ "learning_rate"
72
+ )
73
+ if learning_rate_source == ParameterSource.DEFAULT:
74
+ learning_rate = 1e-3
75
+ else:
76
+ for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
77
+ param_source = click.get_current_context().get_parameter_source(param) # type: ignore[attr-defined]
78
+ if param_source != ParameterSource.DEFAULT:
79
+ raise click.BadParameter(
80
+ f"You set LoRA parameter `{param}` for a full fine-tuning job. "
81
+ f"Please change the job type with --lora or remove `{param}` from the arguments"
82
+ )
83
+
48
84
  response = client.fine_tuning.create(
49
85
  training_file=training_file,
50
86
  model=model,
@@ -52,6 +88,11 @@ def create(
52
88
  n_checkpoints=n_checkpoints,
53
89
  batch_size=batch_size,
54
90
  learning_rate=learning_rate,
91
+ lora=lora,
92
+ lora_r=lora_r,
93
+ lora_dropout=lora_dropout,
94
+ lora_alpha=lora_alpha,
95
+ lora_trainable_modules=lora_trainable_modules,
55
96
  suffix=suffix,
56
97
  wandb_api_key=wandb_api_key,
57
98
  )
@@ -11,8 +11,11 @@ from together.types import (
11
11
  FinetuneListEvents,
12
12
  FinetuneRequest,
13
13
  FinetuneResponse,
14
+ FullTrainingType,
15
+ LoRATrainingType,
14
16
  TogetherClient,
15
17
  TogetherRequest,
18
+ TrainingType,
16
19
  )
17
20
  from together.utils import normalize_key
18
21
 
@@ -30,6 +33,11 @@ class FineTuning:
30
33
  n_checkpoints: int | None = 1,
31
34
  batch_size: int | None = 32,
32
35
  learning_rate: float | None = 0.00001,
36
+ lora: bool = True,
37
+ lora_r: int | None = 8,
38
+ lora_dropout: float | None = 0,
39
+ lora_alpha: float | None = 8,
40
+ lora_trainable_modules: str | None = "all-linear",
33
41
  suffix: str | None = None,
34
42
  wandb_api_key: str | None = None,
35
43
  ) -> FinetuneResponse:
@@ -45,6 +53,11 @@ class FineTuning:
45
53
  batch_size (int, optional): Batch size for fine-tuning. Defaults to 32.
46
54
  learning_rate (float, optional): Learning rate multiplier to use for training
47
55
  Defaults to 0.00001.
56
+ lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
57
+ lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
58
+ lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
59
+ lora_alpha (float, optional): Alpha for LoRA adapters. Defaults to 8.
60
+ lora_trainable_modules (str, optional): Trainable modules for LoRA adapters. Defaults to "all-linear".
48
61
  suffix (str, optional): Up to 40 character suffix that will be added to your fine-tuned model name.
49
62
  Defaults to None.
50
63
  wandb_api_key (str, optional): API key for Weights & Biases integration.
@@ -58,6 +71,15 @@ class FineTuning:
58
71
  client=self._client,
59
72
  )
60
73
 
74
+ training_type: TrainingType = FullTrainingType()
75
+ if lora:
76
+ training_type = LoRATrainingType(
77
+ lora_r=lora_r,
78
+ lora_alpha=lora_alpha,
79
+ lora_dropout=lora_dropout,
80
+ lora_trainable_modules=lora_trainable_modules,
81
+ )
82
+
61
83
  parameter_payload = FinetuneRequest(
62
84
  model=model,
63
85
  training_file=training_file,
@@ -65,6 +87,7 @@ class FineTuning:
65
87
  n_checkpoints=n_checkpoints,
66
88
  batch_size=batch_size,
67
89
  learning_rate=learning_rate,
90
+ training_type=training_type,
68
91
  suffix=suffix,
69
92
  wandb_key=wandb_api_key,
70
93
  ).model_dump()
@@ -26,6 +26,9 @@ from together.types.finetune import (
26
26
  FinetuneListEvents,
27
27
  FinetuneRequest,
28
28
  FinetuneResponse,
29
+ FullTrainingType,
30
+ LoRATrainingType,
31
+ TrainingType,
29
32
  )
30
33
  from together.types.images import (
31
34
  ImageRequest,
@@ -60,4 +63,7 @@ __all__ = [
60
63
  "ImageRequest",
61
64
  "ImageResponse",
62
65
  "ModelObject",
66
+ "TrainingType",
67
+ "FullTrainingType",
68
+ "LoRATrainingType",
63
69
  ]
@@ -100,6 +100,34 @@ class FinetuneEvent(BaseModel):
100
100
  hash: str | None = None
101
101
 
102
102
 
103
+ class TrainingType(BaseModel):
104
+ """
105
+ Abstract training type
106
+ """
107
+
108
+ type: str
109
+
110
+
111
+ class FullTrainingType(TrainingType):
112
+ """
113
+ Training type for full fine-tuning
114
+ """
115
+
116
+ type: str = "Full"
117
+
118
+
119
+ class LoRATrainingType(TrainingType):
120
+ """
121
+ Training type for LoRA adapters training
122
+ """
123
+
124
+ lora_r: int
125
+ lora_alpha: int
126
+ lora_dropout: float
127
+ lora_trainable_modules: str
128
+ type: str = "Lora"
129
+
130
+
103
131
  class FinetuneRequest(BaseModel):
104
132
  """
105
133
  Fine-tune request type
@@ -121,6 +149,7 @@ class FinetuneRequest(BaseModel):
121
149
  suffix: str | None = None
122
150
  # weights & biases api key
123
151
  wandb_key: str | None = None
152
+ training_type: FullTrainingType | LoRATrainingType | None = None
124
153
 
125
154
 
126
155
  class FinetuneResponse(BaseModel):
@@ -138,6 +167,8 @@ class FinetuneResponse(BaseModel):
138
167
  model: str | None = None
139
168
  # output model name
140
169
  output_name: str | None = Field(None, alias="model_output_name")
170
+ # adapter output name
171
+ adapter_output_name: str | None = None
141
172
  # number of epochs
142
173
  n_epochs: int | None = None
143
174
  # number of checkpoints to save
@@ -148,11 +179,8 @@ class FinetuneResponse(BaseModel):
148
179
  learning_rate: float | None = None
149
180
  # number of steps between evals
150
181
  eval_steps: int | None = None
151
- # is LoRA finetune boolean
152
- lora: bool | None = None
153
- lora_r: int | None = None
154
- lora_alpha: int | None = None
155
- lora_dropout: int | None = None
182
+ # training type
183
+ training_type: FullTrainingType | LoRATrainingType | None = None
156
184
  # created/updated datetime stamps
157
185
  created_at: str | None = None
158
186
  updated_at: str | None = None
File without changes
File without changes
File without changes