together 1.5.11__py3-none-any.whl → 1.5.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -82,7 +82,7 @@ def fine_tuning(ctx: click.Context) -> None:
82
82
  @click.option(
83
83
  "--lr-scheduler-type",
84
84
  type=click.Choice(["linear", "cosine"]),
85
- default="linear",
85
+ default="cosine",
86
86
  help="Learning rate scheduler type",
87
87
  )
88
88
  @click.option(
@@ -142,6 +142,30 @@ def fine_tuning(ctx: click.Context) -> None:
142
142
  default=0.1,
143
143
  help="Beta parameter for DPO training (only used when '--training-method' is 'dpo')",
144
144
  )
145
+ @click.option(
146
+ "--dpo-normalize-logratios-by-length",
147
+ type=bool,
148
+ default=False,
149
+ help=(
150
+ "Whether to normalize logratios by sample length "
151
+ "(only used when '--training-method' is 'dpo')"
152
+ ),
153
+ )
154
+ @click.option(
155
+ "--rpo-alpha",
156
+ type=float,
157
+ default=0.0,
158
+ help=(
159
+ "RPO alpha parameter of DPO training to include NLL in the loss "
160
+ "(only used when '--training-method' is 'dpo')"
161
+ ),
162
+ )
163
+ @click.option(
164
+ "--simpo-gamma",
165
+ type=float,
166
+ default=0.1,
167
+ help="SimPO gamma parameter (only used when '--training-method' is 'dpo')",
168
+ )
145
169
  @click.option(
146
170
  "--suffix",
147
171
  "-s",
@@ -206,6 +230,9 @@ def create(
206
230
  train_on_inputs: bool | Literal["auto"],
207
231
  training_method: str,
208
232
  dpo_beta: float,
233
+ dpo_normalize_logratios_by_length: bool,
234
+ rpo_alpha: float,
235
+ simpo_gamma: float,
209
236
  from_checkpoint: str,
210
237
  ) -> None:
211
238
  """Start fine-tuning"""
@@ -239,6 +266,9 @@ def create(
239
266
  train_on_inputs=train_on_inputs,
240
267
  training_method=training_method,
241
268
  dpo_beta=dpo_beta,
269
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
270
+ rpo_alpha=rpo_alpha,
271
+ simpo_gamma=simpo_gamma,
242
272
  from_checkpoint=from_checkpoint,
243
273
  )
244
274
 
together/client.py CHANGED
@@ -23,6 +23,7 @@ class Together:
23
23
  fine_tuning: resources.FineTuning
24
24
  rerank: resources.Rerank
25
25
  audio: resources.Audio
26
+ batches: resources.Batches
26
27
  code_interpreter: CodeInterpreter
27
28
 
28
29
  # client options
@@ -90,6 +91,7 @@ class Together:
90
91
  self.audio = resources.Audio(self.client)
91
92
  self.endpoints = resources.Endpoints(self.client)
92
93
  self.code_interpreter = CodeInterpreter(self.client)
94
+ self.batches = resources.Batches(self.client)
93
95
 
94
96
 
95
97
  class AsyncTogether:
@@ -102,7 +104,7 @@ class AsyncTogether:
102
104
  fine_tuning: resources.AsyncFineTuning
103
105
  rerank: resources.AsyncRerank
104
106
  code_interpreter: CodeInterpreter
105
-
107
+ batches: resources.AsyncBatches
106
108
  # client options
107
109
  client: TogetherClient
108
110
 
@@ -166,6 +168,7 @@ class AsyncTogether:
166
168
  self.fine_tuning = resources.AsyncFineTuning(self.client)
167
169
  self.rerank = resources.AsyncRerank(self.client)
168
170
  self.code_interpreter = CodeInterpreter(self.client)
171
+ self.batches = resources.AsyncBatches(self.client)
169
172
 
170
173
 
171
174
  Client = Together
@@ -8,6 +8,7 @@ from together.resources.finetune import AsyncFineTuning, FineTuning
8
8
  from together.resources.images import AsyncImages, Images
9
9
  from together.resources.models import AsyncModels, Models
10
10
  from together.resources.rerank import AsyncRerank, Rerank
11
+ from together.resources.batch import Batches, AsyncBatches
11
12
 
12
13
 
13
14
  __all__ = [
@@ -31,4 +32,6 @@ __all__ = [
31
32
  "Audio",
32
33
  "AsyncEndpoints",
33
34
  "Endpoints",
35
+ "Batches",
36
+ "AsyncBatches",
34
37
  ]
@@ -0,0 +1,136 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import List
4
+
5
+ from together.abstract import api_requestor
6
+ from together.together_response import TogetherResponse
7
+ from together.types import (
8
+ TogetherClient,
9
+ TogetherRequest,
10
+ BatchJob,
11
+ )
12
+
13
+
14
+ class Batches:
15
+ def __init__(self, client: TogetherClient) -> None:
16
+ self._client = client
17
+
18
+ def create_batch(self, file_id: str, endpoint: str) -> BatchJob:
19
+
20
+ requestor = api_requestor.APIRequestor(
21
+ client=self._client,
22
+ )
23
+
24
+ parameter_payload = {
25
+ "input_file_id": file_id,
26
+ "endpoint": endpoint,
27
+ "completion_window": "24h",
28
+ }
29
+
30
+ response, _, _ = requestor.request(
31
+ options=TogetherRequest(
32
+ method="POST",
33
+ url=f"batches",
34
+ params=parameter_payload,
35
+ ),
36
+ stream=False,
37
+ )
38
+
39
+ assert isinstance(response, TogetherResponse)
40
+ response_body = response.data.get("job", {})
41
+ return BatchJob(**response_body)
42
+
43
+ def get_batch(self, batch_job_id: str) -> BatchJob:
44
+ requestor = api_requestor.APIRequestor(
45
+ client=self._client,
46
+ )
47
+
48
+ response, _, _ = requestor.request(
49
+ options=TogetherRequest(
50
+ method="GET",
51
+ url=f"batches/{batch_job_id}",
52
+ ),
53
+ stream=False,
54
+ )
55
+
56
+ assert isinstance(response, TogetherResponse)
57
+ return BatchJob(**response.data)
58
+
59
+ def list_batches(self) -> List[BatchJob]:
60
+ requestor = api_requestor.APIRequestor(
61
+ client=self._client,
62
+ )
63
+
64
+ response, _, _ = requestor.request(
65
+ options=TogetherRequest(
66
+ method="GET",
67
+ url="batches",
68
+ ),
69
+ stream=False,
70
+ )
71
+
72
+ assert isinstance(response, TogetherResponse)
73
+ jobs = response.data or []
74
+ return [BatchJob(**job) for job in jobs]
75
+
76
+
77
+ class AsyncBatches:
78
+ def __init__(self, client: TogetherClient) -> None:
79
+ self._client = client
80
+
81
+ async def create_batch(self, file_id: str, endpoint: str) -> BatchJob:
82
+ requestor = api_requestor.APIRequestor(
83
+ client=self._client,
84
+ )
85
+
86
+ parameter_payload = {
87
+ "input_file_id": file_id,
88
+ "endpoint": endpoint,
89
+ "completion_window": "24h",
90
+ }
91
+
92
+ response, _, _ = await requestor.arequest(
93
+ options=TogetherRequest(
94
+ method="POST",
95
+ url=f"batches",
96
+ params=parameter_payload,
97
+ ),
98
+ stream=False,
99
+ )
100
+
101
+ assert isinstance(response, TogetherResponse)
102
+ response_body = response.data.get("job", {})
103
+ return BatchJob(**response_body)
104
+
105
+ async def get_batch(self, batch_job_id: str) -> BatchJob:
106
+ requestor = api_requestor.APIRequestor(
107
+ client=self._client,
108
+ )
109
+
110
+ response, _, _ = await requestor.arequest(
111
+ options=TogetherRequest(
112
+ method="GET",
113
+ url=f"batches/{batch_job_id}",
114
+ ),
115
+ stream=False,
116
+ )
117
+
118
+ assert isinstance(response, TogetherResponse)
119
+ return BatchJob(**response.data)
120
+
121
+ async def list_batches(self) -> List[BatchJob]:
122
+ requestor = api_requestor.APIRequestor(
123
+ client=self._client,
124
+ )
125
+
126
+ response, _, _ = await requestor.arequest(
127
+ options=TogetherRequest(
128
+ method="GET",
129
+ url="batches",
130
+ ),
131
+ stream=False,
132
+ )
133
+
134
+ assert isinstance(response, TogetherResponse)
135
+ jobs = response.data or []
136
+ return [BatchJob(**job) for job in jobs]
@@ -32,7 +32,7 @@ class Files:
32
32
  ) -> FileResponse:
33
33
  upload_manager = UploadManager(self._client)
34
34
 
35
- if check:
35
+ if check and purpose == FilePurpose.FineTune:
36
36
  report_dict = check_file(file)
37
37
  if not report_dict["is_check_passed"]:
38
38
  raise FileTypeError(
@@ -53,7 +53,7 @@ def create_finetune_request(
53
53
  n_checkpoints: int | None = 1,
54
54
  batch_size: int | Literal["max"] = "max",
55
55
  learning_rate: float | None = 0.00001,
56
- lr_scheduler_type: Literal["linear", "cosine"] = "linear",
56
+ lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
57
57
  min_lr_ratio: float = 0.0,
58
58
  scheduler_num_cycles: float = 0.5,
59
59
  warmup_ratio: float | None = None,
@@ -72,6 +72,9 @@ def create_finetune_request(
72
72
  train_on_inputs: bool | Literal["auto"] | None = None,
73
73
  training_method: str = "sft",
74
74
  dpo_beta: float | None = None,
75
+ dpo_normalize_logratios_by_length: bool = False,
76
+ rpo_alpha: float | None = None,
77
+ simpo_gamma: float | None = None,
75
78
  from_checkpoint: str | None = None,
76
79
  ) -> FinetuneRequest:
77
80
  if model is not None and from_checkpoint is not None:
@@ -182,6 +185,21 @@ def create_finetune_request(
182
185
 
183
186
  if dpo_beta is not None and training_method != "dpo":
184
187
  raise ValueError("dpo_beta is only supported for DPO training")
188
+ if dpo_normalize_logratios_by_length and training_method != "dpo":
189
+ raise ValueError(
190
+ "dpo_normalize_logratios_by_length=True is only supported for DPO training"
191
+ )
192
+ if rpo_alpha is not None:
193
+ if training_method != "dpo":
194
+ raise ValueError("rpo_alpha is only supported for DPO training")
195
+ if not rpo_alpha >= 0.0:
196
+ raise ValueError(f"rpo_alpha should be non-negative (got {rpo_alpha})")
197
+
198
+ if simpo_gamma is not None:
199
+ if training_method != "dpo":
200
+ raise ValueError("simpo_gamma is only supported for DPO training")
201
+ if not simpo_gamma >= 0.0:
202
+ raise ValueError(f"simpo_gamma should be non-negative (got {simpo_gamma})")
185
203
 
186
204
  lr_scheduler: FinetuneLRScheduler
187
205
  if lr_scheduler_type == "cosine":
@@ -204,7 +222,24 @@ def create_finetune_request(
204
222
  if training_method == "sft":
205
223
  training_method_cls = TrainingMethodSFT(train_on_inputs=train_on_inputs)
206
224
  elif training_method == "dpo":
207
- training_method_cls = TrainingMethodDPO(dpo_beta=dpo_beta)
225
+ if simpo_gamma is not None and simpo_gamma > 0:
226
+ dpo_reference_free = True
227
+ dpo_normalize_logratios_by_length = True
228
+ rprint(
229
+ f"Parameter simpo_gamma was set to {simpo_gamma}. "
230
+ "SimPO training detected. Reference logits will not be used "
231
+ "and length normalization of log-probabilities will be enabled."
232
+ )
233
+ else:
234
+ dpo_reference_free = False
235
+
236
+ training_method_cls = TrainingMethodDPO(
237
+ dpo_beta=dpo_beta,
238
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
239
+ dpo_reference_free=dpo_reference_free,
240
+ rpo_alpha=rpo_alpha,
241
+ simpo_gamma=simpo_gamma,
242
+ )
208
243
 
209
244
  finetune_request = FinetuneRequest(
210
245
  model=model,
@@ -281,7 +316,7 @@ class FineTuning:
281
316
  n_checkpoints: int | None = 1,
282
317
  batch_size: int | Literal["max"] = "max",
283
318
  learning_rate: float | None = 0.00001,
284
- lr_scheduler_type: Literal["linear", "cosine"] = "linear",
319
+ lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
285
320
  min_lr_ratio: float = 0.0,
286
321
  scheduler_num_cycles: float = 0.5,
287
322
  warmup_ratio: float = 0.0,
@@ -302,6 +337,9 @@ class FineTuning:
302
337
  train_on_inputs: bool | Literal["auto"] | None = None,
303
338
  training_method: str = "sft",
304
339
  dpo_beta: float | None = None,
340
+ dpo_normalize_logratios_by_length: bool = False,
341
+ rpo_alpha: float | None = None,
342
+ simpo_gamma: float | None = None,
305
343
  from_checkpoint: str | None = None,
306
344
  ) -> FinetuneResponse:
307
345
  """
@@ -318,7 +356,7 @@ class FineTuning:
318
356
  batch_size (int or "max"): Batch size for fine-tuning. Defaults to max.
319
357
  learning_rate (float, optional): Learning rate multiplier to use for training
320
358
  Defaults to 0.00001.
321
- lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "linear".
359
+ lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "cosine".
322
360
  min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
323
361
  the learning rate scheduler. Defaults to 0.0.
324
362
  scheduler_num_cycles (float, optional): Number or fraction of cycles for the cosine learning rate scheduler. Defaults to 0.5.
@@ -353,6 +391,9 @@ class FineTuning:
353
391
  training_method (str, optional): Training method. Defaults to "sft".
354
392
  Supported methods: "sft", "dpo".
355
393
  dpo_beta (float, optional): DPO beta parameter. Defaults to None.
394
+ dpo_normalize_logratios_by_length (bool): Whether or not normalize logratios by sample length. Defaults to False,
395
+ rpo_alpha (float, optional): RPO alpha parameter of DPO training to include NLL in the loss. Defaults to None.
396
+ simpo_gamma: (float, optional): SimPO gamma parameter. Defaults to None.
356
397
  from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
357
398
  The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
358
399
  The step value is optional, without it the final checkpoint will be used.
@@ -405,6 +446,9 @@ class FineTuning:
405
446
  train_on_inputs=train_on_inputs,
406
447
  training_method=training_method,
407
448
  dpo_beta=dpo_beta,
449
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
450
+ rpo_alpha=rpo_alpha,
451
+ simpo_gamma=simpo_gamma,
408
452
  from_checkpoint=from_checkpoint,
409
453
  )
410
454
 
@@ -693,7 +737,7 @@ class AsyncFineTuning:
693
737
  n_checkpoints: int | None = 1,
694
738
  batch_size: int | Literal["max"] = "max",
695
739
  learning_rate: float | None = 0.00001,
696
- lr_scheduler_type: Literal["linear", "cosine"] = "linear",
740
+ lr_scheduler_type: Literal["linear", "cosine"] = "cosine",
697
741
  min_lr_ratio: float = 0.0,
698
742
  scheduler_num_cycles: float = 0.5,
699
743
  warmup_ratio: float = 0.0,
@@ -714,6 +758,9 @@ class AsyncFineTuning:
714
758
  train_on_inputs: bool | Literal["auto"] | None = None,
715
759
  training_method: str = "sft",
716
760
  dpo_beta: float | None = None,
761
+ dpo_normalize_logratios_by_length: bool = False,
762
+ rpo_alpha: float | None = None,
763
+ simpo_gamma: float | None = None,
717
764
  from_checkpoint: str | None = None,
718
765
  ) -> FinetuneResponse:
719
766
  """
@@ -730,7 +777,7 @@ class AsyncFineTuning:
730
777
  batch_size (int, optional): Batch size for fine-tuning. Defaults to max.
731
778
  learning_rate (float, optional): Learning rate multiplier to use for training
732
779
  Defaults to 0.00001.
733
- lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "linear".
780
+ lr_scheduler_type (Literal["linear", "cosine"]): Learning rate scheduler type. Defaults to "cosine".
734
781
  min_lr_ratio (float, optional): Min learning rate ratio of the initial learning rate for
735
782
  the learning rate scheduler. Defaults to 0.0.
736
783
  scheduler_num_cycles (float, optional): Number or fraction of cycles for the cosine learning rate scheduler. Defaults to 0.5.
@@ -765,6 +812,9 @@ class AsyncFineTuning:
765
812
  training_method (str, optional): Training method. Defaults to "sft".
766
813
  Supported methods: "sft", "dpo".
767
814
  dpo_beta (float, optional): DPO beta parameter. Defaults to None.
815
+ dpo_normalize_logratios_by_length (bool): Whether or not normalize logratios by sample length. Defaults to False,
816
+ rpo_alpha (float, optional): RPO alpha parameter of DPO training to include NLL in the loss. Defaults to None.
817
+ simpo_gamma: (float, optional): SimPO gamma parameter. Defaults to None.
768
818
  from_checkpoint (str, optional): The checkpoint identifier to continue training from a previous fine-tuning job.
769
819
  The format: {$JOB_ID/$OUTPUT_MODEL_NAME}:{$STEP}.
770
820
  The step value is optional, without it the final checkpoint will be used.
@@ -817,6 +867,9 @@ class AsyncFineTuning:
817
867
  train_on_inputs=train_on_inputs,
818
868
  training_method=training_method,
819
869
  dpo_beta=dpo_beta,
870
+ dpo_normalize_logratios_by_length=dpo_normalize_logratios_by_length,
871
+ rpo_alpha=rpo_alpha,
872
+ simpo_gamma=simpo_gamma,
820
873
  from_checkpoint=from_checkpoint,
821
874
  )
822
875
 
@@ -52,6 +52,7 @@ from together.types.finetune import (
52
52
  from together.types.images import ImageRequest, ImageResponse
53
53
  from together.types.models import ModelObject
54
54
  from together.types.rerank import RerankRequest, RerankResponse
55
+ from together.types.batch import BatchJob, BatchJobStatus, BatchEndpoint
55
56
 
56
57
 
57
58
  __all__ = [
@@ -104,4 +105,7 @@ __all__ = [
104
105
  "DedicatedEndpoint",
105
106
  "ListEndpoint",
106
107
  "Autoscaling",
108
+ "BatchJob",
109
+ "BatchJobStatus",
110
+ "BatchEndpoint",
107
111
  ]
@@ -0,0 +1,53 @@
1
+ from __future__ import annotations
2
+
3
+ from enum import Enum
4
+ from typing import Optional
5
+ from datetime import datetime
6
+
7
+ from pydantic import Field
8
+
9
+ from together.types.abstract import BaseModel
10
+
11
+
12
+ class BatchJobStatus(str, Enum):
13
+ """
14
+ The status of a batch job
15
+ """
16
+
17
+ VALIDATING = "VALIDATING"
18
+ IN_PROGRESS = "IN_PROGRESS"
19
+ COMPLETED = "COMPLETED"
20
+ FAILED = "FAILED"
21
+ EXPIRED = "EXPIRED"
22
+ CANCELLED = "CANCELLED"
23
+
24
+
25
+ class BatchEndpoint(str, Enum):
26
+ """
27
+ The endpoint of a batch job
28
+ """
29
+
30
+ COMPLETIONS = "/v1/completions"
31
+ CHAT_COMPLETIONS = "/v1/chat/completions"
32
+ # More endpoints can be added here as needed
33
+
34
+
35
+ class BatchJob(BaseModel):
36
+ """
37
+ A batch job object
38
+ """
39
+
40
+ id: str
41
+ user_id: str
42
+ input_file_id: str
43
+ file_size_bytes: int
44
+ status: BatchJobStatus
45
+ job_deadline: datetime
46
+ created_at: datetime
47
+ endpoint: str
48
+ progress: float = 0.0
49
+ model_id: Optional[str] = None
50
+ output_file_id: Optional[str] = None
51
+ error_file_id: Optional[str] = None
52
+ error: Optional[str] = None
53
+ completed_at: Optional[datetime] = None
together/types/files.py CHANGED
@@ -13,6 +13,7 @@ from together.types.common import (
13
13
 
14
14
  class FilePurpose(str, Enum):
15
15
  FineTune = "fine-tune"
16
+ BatchAPI = "batch-api"
16
17
 
17
18
 
18
19
  class FileType(str, Enum):
@@ -159,6 +159,10 @@ class TrainingMethodDPO(TrainingMethod):
159
159
 
160
160
  method: Literal["dpo"] = "dpo"
161
161
  dpo_beta: float | None = None
162
+ dpo_normalize_logratios_by_length: bool = False
163
+ dpo_reference_free: bool = False
164
+ rpo_alpha: float | None = None
165
+ simpo_gamma: float | None = None
162
166
 
163
167
 
164
168
  class FinetuneRequest(BaseModel):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: together
3
- Version: 1.5.11
3
+ Version: 1.5.14
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  License: Apache-2.0
6
6
  Author: Together AI
@@ -7,12 +7,12 @@ together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/endpoints.py,sha256=f6KafWZvRF6n_ThWdr3y9uhE6wPF37PcD45w_EtgXmY,13289
9
9
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
10
- together/cli/api/finetune.py,sha256=y_FEWvoD7Mi07U2g-sCbPFN_qsBRf8T365xBEbpM2qc,16238
10
+ together/cli/api/finetune.py,sha256=zrjxpPSgqcZRhJA4A_QjXNhNUfEu24zw0Da3UfUlzrY,17063
11
11
  together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
12
12
  together/cli/api/models.py,sha256=CXw8B1hqNkadogi58GIXhLg_dTJnvTBaE7Kq1_xQ-10,1423
13
13
  together/cli/api/utils.py,sha256=IuqYWPnLI38_Bqd7lj8V_SnGdYc59pRmMbQmciS4FsM,1326
14
14
  together/cli/cli.py,sha256=YCDzbXpC5is0rs2PEkUPrIhYuzdyrihQ8GVR_TlDv5s,2054
15
- together/client.py,sha256=lN_KfJs2gCdLfQ5GBrVKin-4ZL7L7UQf9wjofWQ7sXg,5682
15
+ together/client.py,sha256=us5aE8hVzKmMCHZz52NcSPXByOsigd2sKbbyqe4x1m0,5861
16
16
  together/constants.py,sha256=UDJhEylJFmdm4bedBDpvqYXBj5Or3k7z9GWtkRY_dZQ,1526
17
17
  together/error.py,sha256=HU6247CyzCFjaxL9A0XYbXZ6fY_ebRg0FEYjI4Skogs,5515
18
18
  together/filemanager.py,sha256=lwNIYm-BAcnUPtyE0Q_8NpRNsxMlQrpIWFVUVJBBz88,11356
@@ -24,24 +24,26 @@ together/legacy/files.py,sha256=qmAqMiNTPWb6WvLV5Tsv6kxGRfQ31q7OkHZNFwkw8v0,4082
24
24
  together/legacy/finetune.py,sha256=XjZ4Dn2hSjMUVm64s6u1bbh9F7r9GbDKp-WLmzyEKRw,5123
25
25
  together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
26
26
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
27
- together/resources/__init__.py,sha256=OQ8tW9mUIX0Ezk0wvYEnnEym6wGsjBKgXFLU9Ffgb-o,984
27
+ together/resources/__init__.py,sha256=jZ9O14K7wKb1U0bmIMosa9P3W3xPCJhoEJiaHw8anCc,1078
28
28
  together/resources/audio/__init__.py,sha256=e7xp0Lkp_nMAHXcuFHS7dLXP_YqTPMMZIilW1TW_sAI,551
29
29
  together/resources/audio/speech.py,sha256=81ib_gIo-Rxoaipx2Pi9ZsKnOTjeFPwSlBrcUkyX5xk,5211
30
+ together/resources/batch.py,sha256=wSJR30CAFjzZ436vjYdXLCT0ahA5E0ud_qHMS5YvZ1M,3750
30
31
  together/resources/chat/__init__.py,sha256=RsTptdP8MeGjcdIjze896-J27cRvCbUoMft0X2BVlQ8,617
31
32
  together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxIY8uqGHcaU,14459
32
33
  together/resources/code_interpreter.py,sha256=vbN8Mh5MG6HQvqra7p61leIyfebgbgJTM_q2A_Fylhw,2948
33
34
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
34
35
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
35
36
  together/resources/endpoints.py,sha256=NNjp-wyzOotzlscGGrANhOHxQBjHTN8f5kTQTH_CLvE,17177
36
- together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
37
- together/resources/finetune.py,sha256=FxIMIb1ZNjhrK7cE8bYhnwuRZSmLy_nK7HzMAp4XrNQ,37617
37
+ together/resources/files.py,sha256=y3Ri6UtyAa7fjCJ8_fp26Y2hzzi6Aoo21JKkVgljFl8,5026
38
+ together/resources/finetune.py,sha256=1O8JIbtLDY32N6hL88jUQVDEGcXFnl9qJAEREFoEK5k,40407
38
39
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
39
40
  together/resources/models.py,sha256=qgmAXv61Cq4oLxytenEZBywA8shldDHYxJ_EAu_4JWQ,3864
40
41
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
41
42
  together/together_response.py,sha256=a3dgKMPDrlfKQwxYENfNt2T4l2vSZxRWMixhHSy-q3E,1308
42
- together/types/__init__.py,sha256=VgIbE2AOK9c2TQUzkbRbyRkdia2COXJXl_wxPaoxR-M,2688
43
+ together/types/__init__.py,sha256=_93XstLg1OOWratj_N1bsNN-2aS628uHH3SZj0wszyc,2820
43
44
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
44
45
  together/types/audio_speech.py,sha256=jlj8BZf3dkIDARF1P11fuenVLj4try8Yx4RN-EAkhOU,2609
46
+ together/types/batch.py,sha256=FP0RuQ3EDy-FV1bh-biPICvyRS7WqLm38GHz5lzKyXM,1112
45
47
  together/types/chat_completions.py,sha256=qpBCMXEWtRwW_fmiu6cecm3d4h6mcK8gvr-8JkbAopQ,5104
46
48
  together/types/code_interpreter.py,sha256=cjF8TKgRkJllHS4i24dWQZBGTRsG557eHSewOiip0Kk,1770
47
49
  together/types/common.py,sha256=kxZ-N9xtBsGYZBmbIWnZ0rfT3Pn8PFB7sAbp3iv96pw,1525
@@ -49,8 +51,8 @@ together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4
49
51
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
50
52
  together/types/endpoints.py,sha256=EzNhHOoQ_D9fUdNQtxQPeSWiFzdFLqpNodN0YLmv_h0,4393
51
53
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
52
- together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
53
- together/types/finetune.py,sha256=Utdcm3kL_cDfBS3zjXwyHsuP2qFFjCQiQZOsPD-WlpE,10918
54
+ together/types/files.py,sha256=i-Ke57p8Svb1MbMZxu-Fo2zxIc6j-mDO2TLGNwPpGu0,1981
55
+ together/types/finetune.py,sha256=6_jXgVVp4OOQXkABh0HKBzGy47H3wYCG2QxtXbdYauw,11079
54
56
  together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
55
57
  together/types/models.py,sha256=nwQIZGHKZpX9I6mK8z56VW70YC6Ry6JGsVa0s99QVxc,1055
56
58
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
@@ -60,8 +62,8 @@ together/utils/api_helpers.py,sha256=2K0O6qeEQ2zVFvi5NBN5m2kjZJaS3-JfKFecQ7SmGaw
60
62
  together/utils/files.py,sha256=btWQawwXbNKfPmCtRyObZViG1Xx-IPz45PrAtMXvcy8,16741
61
63
  together/utils/tools.py,sha256=H2MTJhEqtBllaDvOyZehIO_IVNK3P17rSDeILtJIVag,2964
62
64
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
63
- together-1.5.11.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
64
- together-1.5.11.dist-info/METADATA,sha256=JhFDu1VPl18jD4YzuGcq4d6rp1ZNXNAf8k2qkuAjjB0,15497
65
- together-1.5.11.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
66
- together-1.5.11.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
67
- together-1.5.11.dist-info/RECORD,,
65
+ together-1.5.14.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
66
+ together-1.5.14.dist-info/METADATA,sha256=5fJlYeJKCtS-wVbWPtI_CDWKVSpMDvF3t-DmC8qxZ2U,15497
67
+ together-1.5.14.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
68
+ together-1.5.14.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
69
+ together-1.5.14.dist-info/RECORD,,