together 1.3.5__py3-none-any.whl → 1.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -92,7 +92,7 @@ def fine_tuning(ctx: click.Context) -> None:
92
92
  @click.option(
93
93
  "--lora/--no-lora",
94
94
  type=bool,
95
- default=False,
95
+ default=True,
96
96
  help="Whether to use LoRA adapters for fine-tuning",
97
97
  )
98
98
  @click.option("--lora-r", type=int, default=8, help="LoRA adapters' rank")
@@ -108,6 +108,9 @@ def fine_tuning(ctx: click.Context) -> None:
108
108
  "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
109
109
  )
110
110
  @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
111
+ @click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL")
112
+ @click.option("--wandb-project-name", type=str, default=None, help="Wandb project name")
113
+ @click.option("--wandb-name", type=str, default=None, help="Wandb run name")
111
114
  @click.option(
112
115
  "--confirm",
113
116
  "-y",
@@ -144,6 +147,9 @@ def create(
144
147
  lora_trainable_modules: str,
145
148
  suffix: str,
146
149
  wandb_api_key: str,
150
+ wandb_base_url: str,
151
+ wandb_project_name: str,
152
+ wandb_name: str,
147
153
  confirm: bool,
148
154
  train_on_inputs: bool | Literal["auto"],
149
155
  ) -> None:
@@ -170,6 +176,9 @@ def create(
170
176
  lora_trainable_modules=lora_trainable_modules,
171
177
  suffix=suffix,
172
178
  wandb_api_key=wandb_api_key,
179
+ wandb_base_url=wandb_base_url,
180
+ wandb_project_name=wandb_project_name,
181
+ wandb_name=wandb_name,
173
182
  train_on_inputs=train_on_inputs,
174
183
  )
175
184
 
@@ -48,6 +48,9 @@ def createFinetuneRequest(
48
48
  lora_trainable_modules: str | None = "all-linear",
49
49
  suffix: str | None = None,
50
50
  wandb_api_key: str | None = None,
51
+ wandb_base_url: str | None = None,
52
+ wandb_project_name: str | None = None,
53
+ wandb_name: str | None = None,
51
54
  train_on_inputs: bool | Literal["auto"] = "auto",
52
55
  ) -> FinetuneRequest:
53
56
  if batch_size == "max":
@@ -118,6 +121,9 @@ def createFinetuneRequest(
118
121
  training_type=training_type,
119
122
  suffix=suffix,
120
123
  wandb_key=wandb_api_key,
124
+ wandb_base_url=wandb_base_url,
125
+ wandb_project_name=wandb_project_name,
126
+ wandb_name=wandb_name,
121
127
  train_on_inputs=train_on_inputs,
122
128
  )
123
129
 
@@ -143,13 +149,16 @@ class FineTuning:
143
149
  warmup_ratio: float = 0.0,
144
150
  max_grad_norm: float = 1.0,
145
151
  weight_decay: float = 0.0,
146
- lora: bool = False,
152
+ lora: bool = True,
147
153
  lora_r: int | None = None,
148
154
  lora_dropout: float | None = 0,
149
155
  lora_alpha: float | None = None,
150
156
  lora_trainable_modules: str | None = "all-linear",
151
157
  suffix: str | None = None,
152
158
  wandb_api_key: str | None = None,
159
+ wandb_base_url: str | None = None,
160
+ wandb_project_name: str | None = None,
161
+ wandb_name: str | None = None,
153
162
  verbose: bool = False,
154
163
  model_limits: FinetuneTrainingLimits | None = None,
155
164
  train_on_inputs: bool | Literal["auto"] = "auto",
@@ -182,6 +191,12 @@ class FineTuning:
182
191
  Defaults to None.
183
192
  wandb_api_key (str, optional): API key for Weights & Biases integration.
184
193
  Defaults to None.
194
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
195
+ Defaults to None.
196
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
197
+ Defaults to None.
198
+ wandb_name (str, optional): Run name for Weights & Biases integration.
199
+ Defaults to None.
185
200
  verbose (bool, optional): whether to print the job parameters before submitting a request.
186
201
  Defaults to False.
187
202
  model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -225,6 +240,9 @@ class FineTuning:
225
240
  lora_trainable_modules=lora_trainable_modules,
226
241
  suffix=suffix,
227
242
  wandb_api_key=wandb_api_key,
243
+ wandb_base_url=wandb_base_url,
244
+ wandb_project_name=wandb_project_name,
245
+ wandb_name=wandb_name,
228
246
  train_on_inputs=train_on_inputs,
229
247
  )
230
248
 
@@ -472,13 +490,16 @@ class AsyncFineTuning:
472
490
  warmup_ratio: float = 0.0,
473
491
  max_grad_norm: float = 1.0,
474
492
  weight_decay: float = 0.0,
475
- lora: bool = False,
493
+ lora: bool = True,
476
494
  lora_r: int | None = None,
477
495
  lora_dropout: float | None = 0,
478
496
  lora_alpha: float | None = None,
479
497
  lora_trainable_modules: str | None = "all-linear",
480
498
  suffix: str | None = None,
481
499
  wandb_api_key: str | None = None,
500
+ wandb_base_url: str | None = None,
501
+ wandb_project_name: str | None = None,
502
+ wandb_name: str | None = None,
482
503
  verbose: bool = False,
483
504
  model_limits: FinetuneTrainingLimits | None = None,
484
505
  train_on_inputs: bool | Literal["auto"] = "auto",
@@ -511,6 +532,12 @@ class AsyncFineTuning:
511
532
  Defaults to None.
512
533
  wandb_api_key (str, optional): API key for Weights & Biases integration.
513
534
  Defaults to None.
535
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
536
+ Defaults to None.
537
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
538
+ Defaults to None.
539
+ wandb_name (str, optional): Run name for Weights & Biases integration.
540
+ Defaults to None.
514
541
  verbose (bool, optional): whether to print the job parameters before submitting a request.
515
542
  Defaults to False.
516
543
  model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -554,6 +581,9 @@ class AsyncFineTuning:
554
581
  lora_trainable_modules=lora_trainable_modules,
555
582
  suffix=suffix,
556
583
  wandb_api_key=wandb_api_key,
584
+ wandb_base_url=wandb_base_url,
585
+ wandb_project_name=wandb_project_name,
586
+ wandb_name=wandb_name,
557
587
  train_on_inputs=train_on_inputs,
558
588
  )
559
589
 
@@ -27,6 +27,7 @@ class MessageRole(str, Enum):
27
27
 
28
28
  class ResponseFormatType(str, Enum):
29
29
  JSON_OBJECT = "json_object"
30
+ JSON_SCHEMA = "json_schema"
30
31
 
31
32
 
32
33
  class FunctionCall(BaseModel):
@@ -168,7 +168,15 @@ class FinetuneRequest(BaseModel):
168
168
  suffix: str | None = None
169
169
  # weights & biases api key
170
170
  wandb_key: str | None = None
171
+ # weights & biases base url
172
+ wandb_base_url: str | None = None
173
+ # wandb project name
174
+ wandb_project_name: str | None = None
175
+ # wandb run name
176
+ wandb_name: str | None = None
177
+ # training type
171
178
  training_type: FullTrainingType | LoRATrainingType | None = None
179
+ # train on inputs
172
180
  train_on_inputs: StrictBool | Literal["auto"] = "auto"
173
181
 
174
182
 
@@ -236,8 +244,12 @@ class FinetuneResponse(BaseModel):
236
244
  evals_completed: int | None = None
237
245
  # place in job queue (decrementing counter)
238
246
  queue_depth: int | None = None
239
- # weights & biases project name
247
+ # weights & biases base url
248
+ wandb_base_url: str | None = None
249
+ # wandb project name
240
250
  wandb_project_name: str | None = None
251
+ # wandb run name
252
+ wandb_name: str | None = None
241
253
  # weights & biases job url
242
254
  wandb_url: str | None = None
243
255
  # training file metadata
together/utils/files.py CHANGED
@@ -120,7 +120,8 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
120
120
  raise InvalidFileFormatError(
121
121
  message=(
122
122
  f"Error parsing file. Invalid format on line {idx + 1} of the input file. "
123
- 'Example of valid json: {"text": "my sample string"}. '
123
+ "Datasets must follow text, conversational, or instruction format. For more"
124
+ "information, see https://docs.together.ai/docs/fine-tuning-data-preparation"
124
125
  ),
125
126
  line_number=idx + 1,
126
127
  error_source="line_type",
@@ -142,6 +143,18 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
142
143
  error_source="format",
143
144
  )
144
145
 
146
+ # Check that there are no extra columns
147
+ for column in json_line:
148
+ if (
149
+ column
150
+ not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]
151
+ ):
152
+ raise InvalidFileFormatError(
153
+ message=f'Found extra column "{column}" in the line {idx + 1}.',
154
+ line_number=idx + 1,
155
+ error_source="format",
156
+ )
157
+
145
158
  if current_format is None:
146
159
  raise InvalidFileFormatError(
147
160
  message=(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.3.5
3
+ Version: 1.3.9
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -6,7 +6,7 @@ together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
9
- together/cli/api/finetune.py,sha256=78dJs_hF_gDWQjUT5R3v518GmNQnnB0Qt8CyU68e5jY,12760
9
+ together/cli/api/finetune.py,sha256=dcpGJBXLrsZc5EwRExyfsg0puWiook-muTQD9RP9L40,13198
10
10
  together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
11
11
  together/cli/api/models.py,sha256=xWEzu8ZpxM_Pz9KEjRPRVuv_v22RayYZ4QcgiezT5tE,1126
12
12
  together/cli/api/utils.py,sha256=IuqYWPnLI38_Bqd7lj8V_SnGdYc59pRmMbQmciS4FsM,1326
@@ -29,31 +29,31 @@ together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxI
29
29
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
30
30
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
31
31
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
32
- together/resources/finetune.py,sha256=UcbPAZ0b_WR3ks754n5fPzDjraNQHSkulaKGmQQZ2Zs,25516
32
+ together/resources/finetune.py,sha256=0UiN2jxxV_lQ9QSFKDjAioXVgPCIzb7biIJbcQj1oq4,26998
33
33
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
34
34
  together/resources/models.py,sha256=2dtHhXAqTDOOpwSbYLzWcKTC0-m2Szlb7LDYvp7Jr4w,1786
35
35
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
36
36
  together/together_response.py,sha256=MhczUCPem93cjX-A1TOAUrRj3sO-o3SLcEcTsZgVzQI,1319
37
37
  together/types/__init__.py,sha256=jEnnepzUeeYgCNTQIi4EWKaOEsZKYp0vEqzYmP8bK5o,1863
38
38
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
39
- together/types/chat_completions.py,sha256=d24F3VfT7uVnmaEk7Fn-O7qkGUg_AQQzR7vPwlXVDXw,4882
39
+ together/types/chat_completions.py,sha256=tIHQzB1N1DsUl3WojsrfErqxVmcI_eweGVp_gbf6dp8,4914
40
40
  together/types/common.py,sha256=4ZeIgqGioqhIC-nNxY90czNPp-kAqboMulw6-1z6ShM,1511
41
41
  together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4,2971
42
42
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
43
43
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
44
44
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
45
- together/types/finetune.py,sha256=17IM5A__GnT6hgMClMz0vESohWI_qh5Eeq3iR9w1ODg,8704
45
+ together/types/finetune.py,sha256=u4rZne7dd0F3jfQ9iXxIVG405kfr65rlJiEMkEZrfWY,9052
46
46
  together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
47
47
  together/types/models.py,sha256=K9Om3cCFexy7qzRSEXUj7gpCy1CVb1hHx7MGG-hvTLw,1035
48
48
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
49
49
  together/utils/__init__.py,sha256=n1kmLiaExT9YOKT5ye--dC4tW2qcHeicKX0GR86U640,698
50
50
  together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
51
51
  together/utils/api_helpers.py,sha256=RSF7SRhbjHzroMOSWAXscflByM1r1ta_1SpxkAT22iE,2407
52
- together/utils/files.py,sha256=rBCwez0i0bcJIgQQsgd-ROgcakR5NfSmUreYPQoE5Nk,13005
52
+ together/utils/files.py,sha256=EJaziXezArb4cKtNodqaOsNJT-FWb1qIfMmSCFqPPm8,13745
53
53
  together/utils/tools.py,sha256=3-lXWP3cBCzOVSZg9tr5zOT1jaVeKAKVWxO2fcXZTh8,1788
54
54
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
55
- together-1.3.5.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
56
- together-1.3.5.dist-info/METADATA,sha256=4naWLEoh8icjBGlIVvJSXlNjtwFGdgKpWi-hVEXDo-E,11829
57
- together-1.3.5.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
58
- together-1.3.5.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
59
- together-1.3.5.dist-info/RECORD,,
55
+ together-1.3.9.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
56
+ together-1.3.9.dist-info/METADATA,sha256=wcvLZbn15o_pE0BJcJ9xtEdMn49EOsG8KYoypxtacxY,11829
57
+ together-1.3.9.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
58
+ together-1.3.9.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
59
+ together-1.3.9.dist-info/RECORD,,