together 1.3.4__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -65,12 +65,30 @@ def fine_tuning(ctx: click.Context) -> None:
65
65
  )
66
66
  @click.option("--batch-size", type=INT_WITH_MAX, default="max", help="Train batch size")
67
67
  @click.option("--learning-rate", type=float, default=1e-5, help="Learning rate")
68
+ @click.option(
69
+ "--min-lr-ratio",
70
+ type=float,
71
+ default=0.0,
72
+ help="The ratio of the final learning rate to the peak learning rate",
73
+ )
68
74
  @click.option(
69
75
  "--warmup-ratio",
70
76
  type=float,
71
77
  default=0.0,
72
78
  help="Warmup ratio for learning rate scheduler.",
73
79
  )
80
+ @click.option(
81
+ "--max-grad-norm",
82
+ type=float,
83
+ default=1.0,
84
+ help="Max gradient norm to be used for gradient clipping. Set to 0 to disable.",
85
+ )
86
+ @click.option(
87
+ "--weight-decay",
88
+ type=float,
89
+ default=0.0,
90
+ help="Weight decay",
91
+ )
74
92
  @click.option(
75
93
  "--lora/--no-lora",
76
94
  type=bool,
@@ -90,6 +108,9 @@ def fine_tuning(ctx: click.Context) -> None:
90
108
  "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
91
109
  )
92
110
  @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
111
+ @click.option("--wandb-base-url", type=str, default=None, help="Wandb base URL")
112
+ @click.option("--wandb-project-name", type=str, default=None, help="Wandb project name")
113
+ @click.option("--wandb-name", type=str, default=None, help="Wandb run name")
93
114
  @click.option(
94
115
  "--confirm",
95
116
  "-y",
@@ -115,7 +136,10 @@ def create(
115
136
  n_checkpoints: int,
116
137
  batch_size: int | Literal["max"],
117
138
  learning_rate: float,
139
+ min_lr_ratio: float,
118
140
  warmup_ratio: float,
141
+ max_grad_norm: float,
142
+ weight_decay: float,
119
143
  lora: bool,
120
144
  lora_r: int,
121
145
  lora_dropout: float,
@@ -123,6 +147,9 @@ def create(
123
147
  lora_trainable_modules: str,
124
148
  suffix: str,
125
149
  wandb_api_key: str,
150
+ wandb_base_url: str,
151
+ wandb_project_name: str,
152
+ wandb_name: str,
126
153
  confirm: bool,
127
154
  train_on_inputs: bool | Literal["auto"],
128
155
  ) -> None:
@@ -138,7 +165,10 @@ def create(
138
165
  n_checkpoints=n_checkpoints,
139
166
  batch_size=batch_size,
140
167
  learning_rate=learning_rate,
168
+ min_lr_ratio=min_lr_ratio,
141
169
  warmup_ratio=warmup_ratio,
170
+ max_grad_norm=max_grad_norm,
171
+ weight_decay=weight_decay,
142
172
  lora=lora,
143
173
  lora_r=lora_r,
144
174
  lora_dropout=lora_dropout,
@@ -146,6 +176,9 @@ def create(
146
176
  lora_trainable_modules=lora_trainable_modules,
147
177
  suffix=suffix,
148
178
  wandb_api_key=wandb_api_key,
179
+ wandb_base_url=wandb_base_url,
180
+ wandb_project_name=wandb_project_name,
181
+ wandb_name=wandb_name,
149
182
  train_on_inputs=train_on_inputs,
150
183
  )
151
184
 
@@ -20,6 +20,8 @@ from together.types import (
20
20
  TogetherClient,
21
21
  TogetherRequest,
22
22
  TrainingType,
23
+ FinetuneLRScheduler,
24
+ FinetuneLinearLRSchedulerArgs,
23
25
  )
24
26
  from together.types.finetune import DownloadCheckpointType
25
27
  from together.utils import log_warn_once, normalize_key
@@ -35,7 +37,10 @@ def createFinetuneRequest(
35
37
  n_checkpoints: int | None = 1,
36
38
  batch_size: int | Literal["max"] = "max",
37
39
  learning_rate: float | None = 0.00001,
38
- warmup_ratio: float | None = 0.0,
40
+ min_lr_ratio: float = 0.0,
41
+ warmup_ratio: float = 0.0,
42
+ max_grad_norm: float = 1.0,
43
+ weight_decay: float = 0.0,
39
44
  lora: bool = False,
40
45
  lora_r: int | None = None,
41
46
  lora_dropout: float | None = 0,
@@ -43,6 +48,9 @@ def createFinetuneRequest(
43
48
  lora_trainable_modules: str | None = "all-linear",
44
49
  suffix: str | None = None,
45
50
  wandb_api_key: str | None = None,
51
+ wandb_base_url: str | None = None,
52
+ wandb_project_name: str | None = None,
53
+ wandb_name: str | None = None,
46
54
  train_on_inputs: bool | Literal["auto"] = "auto",
47
55
  ) -> FinetuneRequest:
48
56
  if batch_size == "max":
@@ -83,6 +91,20 @@ def createFinetuneRequest(
83
91
  if warmup_ratio > 1 or warmup_ratio < 0:
84
92
  raise ValueError("Warmup ratio should be between 0 and 1")
85
93
 
94
+ if min_lr_ratio is not None and (min_lr_ratio > 1 or min_lr_ratio < 0):
95
+ raise ValueError("Min learning rate ratio should be between 0 and 1")
96
+
97
+ if max_grad_norm < 0:
98
+ raise ValueError("Max gradient norm should be non-negative")
99
+
100
+ if weight_decay is not None and (weight_decay < 0):
101
+ raise ValueError("Weight decay should be non-negative")
102
+
103
+ lrScheduler = FinetuneLRScheduler(
104
+ lr_scheduler_type="linear",
105
+ lr_scheduler_args=FinetuneLinearLRSchedulerArgs(min_lr_ratio=min_lr_ratio),
106
+ )
107
+
86
108
  finetune_request = FinetuneRequest(
87
109
  model=model,
88
110
  training_file=training_file,
@@ -92,10 +114,16 @@ def createFinetuneRequest(
92
114
  n_checkpoints=n_checkpoints,
93
115
  batch_size=batch_size,
94
116
  learning_rate=learning_rate,
117
+ lr_scheduler=lrScheduler,
95
118
  warmup_ratio=warmup_ratio,
119
+ max_grad_norm=max_grad_norm,
120
+ weight_decay=weight_decay,
96
121
  training_type=training_type,
97
122
  suffix=suffix,
98
123
  wandb_key=wandb_api_key,
124
+ wandb_base_url=wandb_base_url,
125
+ wandb_project_name=wandb_project_name,
126
+ wandb_name=wandb_name,
99
127
  train_on_inputs=train_on_inputs,
100
128
  )
101
129
 
@@ -117,7 +145,10 @@ class FineTuning:
117
145
  n_checkpoints: int | None = 1,
118
146
  batch_size: int | Literal["max"] = "max",
119
147
  learning_rate: float | None = 0.00001,
120
- warmup_ratio: float | None = 0.0,
148
+ min_lr_ratio: float = 0.0,
149
+ warmup_ratio: float = 0.0,
150
+ max_grad_norm: float = 1.0,
151
+ weight_decay: float = 0.0,
121
152
  lora: bool = False,
122
153
  lora_r: int | None = None,
123
154
  lora_dropout: float | None = 0,
@@ -125,6 +156,9 @@ class FineTuning:
125
156
  lora_trainable_modules: str | None = "all-linear",
126
157
  suffix: str | None = None,
127
158
  wandb_api_key: str | None = None,
159
+ wandb_base_url: str | None = None,
160
+ wandb_project_name: str | None = None,
161
+ wandb_name: str | None = None,
128
162
  verbose: bool = False,
129
163
  model_limits: FinetuneTrainingLimits | None = None,
130
164
  train_on_inputs: bool | Literal["auto"] = "auto",
@@ -143,7 +177,11 @@ class FineTuning:
143
177
  batch_size (int or "max"): Batch size for fine-tuning. Defaults to max.
144
178
  learning_rate (float, optional): Learning rate multiplier to use for training
145
179
  Defaults to 0.00001.
180
+ min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
181
+ the learning rate scheduler. Defaults to 0.0.
146
182
  warmup_ratio (float, optional): Warmup ratio for learning rate scheduler.
183
+ max_grad_norm (float, optional): Max gradient norm. Defaults to 1.0, set to 0 to disable.
184
+ weight_decay (float, optional): Weight decay. Defaults to 0.0.
147
185
  lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
148
186
  lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
149
187
  lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
@@ -153,6 +191,12 @@ class FineTuning:
153
191
  Defaults to None.
154
192
  wandb_api_key (str, optional): API key for Weights & Biases integration.
155
193
  Defaults to None.
194
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
195
+ Defaults to None.
196
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
197
+ Defaults to None.
198
+ wandb_name (str, optional): Run name for Weights & Biases integration.
199
+ Defaults to None.
156
200
  verbose (bool, optional): whether to print the job parameters before submitting a request.
157
201
  Defaults to False.
158
202
  model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -185,7 +229,10 @@ class FineTuning:
185
229
  n_checkpoints=n_checkpoints,
186
230
  batch_size=batch_size,
187
231
  learning_rate=learning_rate,
232
+ min_lr_ratio=min_lr_ratio,
188
233
  warmup_ratio=warmup_ratio,
234
+ max_grad_norm=max_grad_norm,
235
+ weight_decay=weight_decay,
189
236
  lora=lora,
190
237
  lora_r=lora_r,
191
238
  lora_dropout=lora_dropout,
@@ -193,6 +240,9 @@ class FineTuning:
193
240
  lora_trainable_modules=lora_trainable_modules,
194
241
  suffix=suffix,
195
242
  wandb_api_key=wandb_api_key,
243
+ wandb_base_url=wandb_base_url,
244
+ wandb_project_name=wandb_project_name,
245
+ wandb_name=wandb_name,
196
246
  train_on_inputs=train_on_inputs,
197
247
  )
198
248
 
@@ -436,7 +486,10 @@ class AsyncFineTuning:
436
486
  n_checkpoints: int | None = 1,
437
487
  batch_size: int | Literal["max"] = "max",
438
488
  learning_rate: float | None = 0.00001,
439
- warmup_ratio: float | None = 0.0,
489
+ min_lr_ratio: float = 0.0,
490
+ warmup_ratio: float = 0.0,
491
+ max_grad_norm: float = 1.0,
492
+ weight_decay: float = 0.0,
440
493
  lora: bool = False,
441
494
  lora_r: int | None = None,
442
495
  lora_dropout: float | None = 0,
@@ -444,6 +497,9 @@ class AsyncFineTuning:
444
497
  lora_trainable_modules: str | None = "all-linear",
445
498
  suffix: str | None = None,
446
499
  wandb_api_key: str | None = None,
500
+ wandb_base_url: str | None = None,
501
+ wandb_project_name: str | None = None,
502
+ wandb_name: str | None = None,
447
503
  verbose: bool = False,
448
504
  model_limits: FinetuneTrainingLimits | None = None,
449
505
  train_on_inputs: bool | Literal["auto"] = "auto",
@@ -462,7 +518,11 @@ class AsyncFineTuning:
462
518
  batch_size (int, optional): Batch size for fine-tuning. Defaults to max.
463
519
  learning_rate (float, optional): Learning rate multiplier to use for training
464
520
  Defaults to 0.00001.
521
+ min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
522
+ the learning rate scheduler. Defaults to 0.0.
465
523
  warmup_ratio (float, optional): Warmup ratio for learning rate scheduler.
524
+ max_grad_norm (float, optional): Max gradient norm. Defaults to 1.0, set to 0 to disable.
525
+ weight_decay (float, optional): Weight decay. Defaults to 0.0.
466
526
  lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
467
527
  lora_r (int, optional): Rank of LoRA adapters. Defaults to 8.
468
528
  lora_dropout (float, optional): Dropout rate for LoRA adapters. Defaults to 0.
@@ -472,6 +532,12 @@ class AsyncFineTuning:
472
532
  Defaults to None.
473
533
  wandb_api_key (str, optional): API key for Weights & Biases integration.
474
534
  Defaults to None.
535
+ wandb_base_url (str, optional): Base URL for Weights & Biases integration.
536
+ Defaults to None.
537
+ wandb_project_name (str, optional): Project name for Weights & Biases integration.
538
+ Defaults to None.
539
+ wandb_name (str, optional): Run name for Weights & Biases integration.
540
+ Defaults to None.
475
541
  verbose (bool, optional): whether to print the job parameters before submitting a request.
476
542
  Defaults to False.
477
543
  model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
@@ -504,7 +570,10 @@ class AsyncFineTuning:
504
570
  n_checkpoints=n_checkpoints,
505
571
  batch_size=batch_size,
506
572
  learning_rate=learning_rate,
573
+ min_lr_ratio=min_lr_ratio,
507
574
  warmup_ratio=warmup_ratio,
575
+ max_grad_norm=max_grad_norm,
576
+ weight_decay=weight_decay,
508
577
  lora=lora,
509
578
  lora_r=lora_r,
510
579
  lora_dropout=lora_dropout,
@@ -512,6 +581,9 @@ class AsyncFineTuning:
512
581
  lora_trainable_modules=lora_trainable_modules,
513
582
  suffix=suffix,
514
583
  wandb_api_key=wandb_api_key,
584
+ wandb_base_url=wandb_base_url,
585
+ wandb_project_name=wandb_project_name,
586
+ wandb_name=wandb_name,
515
587
  train_on_inputs=train_on_inputs,
516
588
  )
517
589
 
@@ -30,6 +30,8 @@ from together.types.finetune import (
30
30
  LoRATrainingType,
31
31
  TrainingType,
32
32
  FinetuneTrainingLimits,
33
+ FinetuneLRScheduler,
34
+ FinetuneLinearLRSchedulerArgs,
33
35
  )
34
36
  from together.types.images import (
35
37
  ImageRequest,
@@ -57,6 +59,8 @@ __all__ = [
57
59
  "FinetuneList",
58
60
  "FinetuneListEvents",
59
61
  "FinetuneDownloadResult",
62
+ "FinetuneLRScheduler",
63
+ "FinetuneLinearLRSchedulerArgs",
60
64
  "FileRequest",
61
65
  "FileResponse",
62
66
  "FileList",
@@ -27,6 +27,7 @@ class MessageRole(str, Enum):
27
27
 
28
28
  class ResponseFormatType(str, Enum):
29
29
  JSON_OBJECT = "json_object"
30
+ JSON_SCHEMA = "json_schema"
30
31
 
31
32
 
32
33
  class FunctionCall(BaseModel):
@@ -150,8 +150,14 @@ class FinetuneRequest(BaseModel):
150
150
  n_epochs: int
151
151
  # training learning rate
152
152
  learning_rate: float
153
+ # learning rate scheduler type and args
154
+ lr_scheduler: FinetuneLRScheduler | None = None
153
155
  # learning rate warmup ratio
154
156
  warmup_ratio: float
157
+ # max gradient norm
158
+ max_grad_norm: float
159
+ # weight decay
160
+ weight_decay: float
155
161
  # number of checkpoints to save
156
162
  n_checkpoints: int | None = None
157
163
  # number of evaluation loops to run
@@ -162,7 +168,15 @@ class FinetuneRequest(BaseModel):
162
168
  suffix: str | None = None
163
169
  # weights & biases api key
164
170
  wandb_key: str | None = None
171
+ # weights & biases base url
172
+ wandb_base_url: str | None = None
173
+ # wandb project name
174
+ wandb_project_name: str | None = None
175
+ # wandb run name
176
+ wandb_name: str | None = None
177
+ # training type
165
178
  training_type: FullTrainingType | LoRATrainingType | None = None
179
+ # train on inputs
166
180
  train_on_inputs: StrictBool | Literal["auto"] = "auto"
167
181
 
168
182
 
@@ -193,8 +207,14 @@ class FinetuneResponse(BaseModel):
193
207
  batch_size: int | None = None
194
208
  # training learning rate
195
209
  learning_rate: float | None = None
210
+ # learning rate scheduler type and args
211
+ lr_scheduler: FinetuneLRScheduler | None = None
196
212
  # learning rate warmup ratio
197
213
  warmup_ratio: float | None = None
214
+ # max gradient norm
215
+ max_grad_norm: float | None = None
216
+ # weight decay
217
+ weight_decay: float | None = None
198
218
  # number of steps between evals
199
219
  eval_steps: int | None = None
200
220
  # training type
@@ -224,8 +244,12 @@ class FinetuneResponse(BaseModel):
224
244
  evals_completed: int | None = None
225
245
  # place in job queue (decrementing counter)
226
246
  queue_depth: int | None = None
227
- # weights & biases project name
247
+ # weights & biases base url
248
+ wandb_base_url: str | None = None
249
+ # wandb project name
228
250
  wandb_project_name: str | None = None
251
+ # wandb run name
252
+ wandb_name: str | None = None
229
253
  # weights & biases job url
230
254
  wandb_url: str | None = None
231
255
  # training file metadata
@@ -287,3 +311,12 @@ class FinetuneTrainingLimits(BaseModel):
287
311
  min_learning_rate: float
288
312
  full_training: FinetuneFullTrainingLimits | None = None
289
313
  lora_training: FinetuneLoraTrainingLimits | None = None
314
+
315
+
316
+ class FinetuneLRScheduler(BaseModel):
317
+ lr_scheduler_type: str
318
+ lr_scheduler_args: FinetuneLinearLRSchedulerArgs | None = None
319
+
320
+
321
+ class FinetuneLinearLRSchedulerArgs(BaseModel):
322
+ min_lr_ratio: float | None = 0.0
together/utils/files.py CHANGED
@@ -120,7 +120,8 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
120
120
  raise InvalidFileFormatError(
121
121
  message=(
122
122
  f"Error parsing file. Invalid format on line {idx + 1} of the input file. "
123
- 'Example of valid json: {"text": "my sample string"}. '
123
+ "Datasets must follow text, conversational, or instruction format. For more"
124
+ "information, see https://docs.together.ai/docs/fine-tuning-data-preparation"
124
125
  ),
125
126
  line_number=idx + 1,
126
127
  error_source="line_type",
@@ -142,6 +143,18 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
142
143
  error_source="format",
143
144
  )
144
145
 
146
+ # Check that there are no extra columns
147
+ for column in json_line:
148
+ if (
149
+ column
150
+ not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]
151
+ ):
152
+ raise InvalidFileFormatError(
153
+ message=f'Found extra column "{column}" in the line {idx + 1}.',
154
+ line_number=idx + 1,
155
+ error_source="format",
156
+ )
157
+
145
158
  if current_format is None:
146
159
  raise InvalidFileFormatError(
147
160
  message=(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.3.4
3
+ Version: 1.3.8
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -6,7 +6,7 @@ together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
9
- together/cli/api/finetune.py,sha256=vl-0cTubZER7wKEPFTFfhe8_Ry_Squ4PypPzR0VHClg,12175
9
+ together/cli/api/finetune.py,sha256=3jVAHyqQF3prlJSmqRXaNPSTV-K-dJgJsWidPf4j0p8,13199
10
10
  together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
11
11
  together/cli/api/models.py,sha256=xWEzu8ZpxM_Pz9KEjRPRVuv_v22RayYZ4QcgiezT5tE,1126
12
12
  together/cli/api/utils.py,sha256=IuqYWPnLI38_Bqd7lj8V_SnGdYc59pRmMbQmciS4FsM,1326
@@ -29,31 +29,31 @@ together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxI
29
29
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
30
30
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
31
31
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
32
- together/resources/finetune.py,sha256=K_jLNeApduKQXtz9rN7V_tG_IZdfwGrmf_zYgJNX9aA,23609
32
+ together/resources/finetune.py,sha256=3Axh_8ZQ5fM35QkDX9O-r-exYeP8hhhSDhLucdDnSus,27000
33
33
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
34
34
  together/resources/models.py,sha256=2dtHhXAqTDOOpwSbYLzWcKTC0-m2Szlb7LDYvp7Jr4w,1786
35
35
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
36
36
  together/together_response.py,sha256=MhczUCPem93cjX-A1TOAUrRj3sO-o3SLcEcTsZgVzQI,1319
37
- together/types/__init__.py,sha256=oHZCMC0H3j1ykf7ZRgxIU0QBA534EMpfKqRaa9SdgOo,1739
37
+ together/types/__init__.py,sha256=jEnnepzUeeYgCNTQIi4EWKaOEsZKYp0vEqzYmP8bK5o,1863
38
38
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
39
- together/types/chat_completions.py,sha256=d24F3VfT7uVnmaEk7Fn-O7qkGUg_AQQzR7vPwlXVDXw,4882
39
+ together/types/chat_completions.py,sha256=tIHQzB1N1DsUl3WojsrfErqxVmcI_eweGVp_gbf6dp8,4914
40
40
  together/types/common.py,sha256=4ZeIgqGioqhIC-nNxY90czNPp-kAqboMulw6-1z6ShM,1511
41
41
  together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4,2971
42
42
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
43
43
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
44
44
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
45
- together/types/finetune.py,sha256=1-EZ-HB1wA2fYX2Gt8u-nVPy6UgVyNQwh4aYzvo8eic,8079
45
+ together/types/finetune.py,sha256=u4rZne7dd0F3jfQ9iXxIVG405kfr65rlJiEMkEZrfWY,9052
46
46
  together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
47
47
  together/types/models.py,sha256=K9Om3cCFexy7qzRSEXUj7gpCy1CVb1hHx7MGG-hvTLw,1035
48
48
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
49
49
  together/utils/__init__.py,sha256=n1kmLiaExT9YOKT5ye--dC4tW2qcHeicKX0GR86U640,698
50
50
  together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
51
51
  together/utils/api_helpers.py,sha256=RSF7SRhbjHzroMOSWAXscflByM1r1ta_1SpxkAT22iE,2407
52
- together/utils/files.py,sha256=rBCwez0i0bcJIgQQsgd-ROgcakR5NfSmUreYPQoE5Nk,13005
52
+ together/utils/files.py,sha256=EJaziXezArb4cKtNodqaOsNJT-FWb1qIfMmSCFqPPm8,13745
53
53
  together/utils/tools.py,sha256=3-lXWP3cBCzOVSZg9tr5zOT1jaVeKAKVWxO2fcXZTh8,1788
54
54
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
55
- together-1.3.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
56
- together-1.3.4.dist-info/METADATA,sha256=4z5uVKF141cdQiwBWGVlpBFvkMAOHb5RDExHDh9UtFg,11829
57
- together-1.3.4.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
58
- together-1.3.4.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
59
- together-1.3.4.dist-info/RECORD,,
55
+ together-1.3.8.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
56
+ together-1.3.8.dist-info/METADATA,sha256=0wiDHxcIOd4YX2-1-h8EIu9O-9PV8A0pxD0q3QEywqY,11829
57
+ together-1.3.8.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
58
+ together-1.3.8.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
59
+ together-1.3.8.dist-info/RECORD,,