together 1.2.13__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import json
4
4
  from datetime import datetime
5
5
  from textwrap import wrap
6
+ from typing import Any, Literal
6
7
 
7
8
  import click
8
9
  from click.core import ParameterSource # type: ignore[attr-defined]
@@ -10,8 +11,9 @@ from rich import print as rprint
10
11
  from tabulate import tabulate
11
12
 
12
13
  from together import Together
13
- from together.types.finetune import DownloadCheckpointType
14
+ from together.cli.api.utils import INT_WITH_MAX
14
15
  from together.utils import finetune_price_to_dollars, log_warn, parse_timestamp
16
+ from together.types.finetune import DownloadCheckpointType, FinetuneTrainingLimits
15
17
 
16
18
 
17
19
  _CONFIRMATION_MESSAGE = (
@@ -56,7 +58,7 @@ def fine_tuning(ctx: click.Context) -> None:
56
58
  @click.option(
57
59
  "--n-checkpoints", type=int, default=1, help="Number of checkpoints to save"
58
60
  )
59
- @click.option("--batch-size", type=int, default=16, help="Train batch size")
61
+ @click.option("--batch-size", type=INT_WITH_MAX, default="max", help="Train batch size")
60
62
  @click.option("--learning-rate", type=float, default=1e-5, help="Learning rate")
61
63
  @click.option(
62
64
  "--lora/--no-lora",
@@ -93,7 +95,7 @@ def create(
93
95
  n_epochs: int,
94
96
  n_evals: int,
95
97
  n_checkpoints: int,
96
- batch_size: int,
98
+ batch_size: int | Literal["max"],
97
99
  learning_rate: float,
98
100
  lora: bool,
99
101
  lora_r: int,
@@ -107,20 +109,64 @@ def create(
107
109
  """Start fine-tuning"""
108
110
  client: Together = ctx.obj
109
111
 
112
+ training_args: dict[str, Any] = dict(
113
+ training_file=training_file,
114
+ model=model,
115
+ n_epochs=n_epochs,
116
+ validation_file=validation_file,
117
+ n_evals=n_evals,
118
+ n_checkpoints=n_checkpoints,
119
+ batch_size=batch_size,
120
+ learning_rate=learning_rate,
121
+ lora=lora,
122
+ lora_r=lora_r,
123
+ lora_dropout=lora_dropout,
124
+ lora_alpha=lora_alpha,
125
+ lora_trainable_modules=lora_trainable_modules,
126
+ suffix=suffix,
127
+ wandb_api_key=wandb_api_key,
128
+ )
129
+
130
+ model_limits: FinetuneTrainingLimits = client.fine_tuning.get_model_limits(
131
+ model=model
132
+ )
133
+
110
134
  if lora:
111
- learning_rate_source = click.get_current_context().get_parameter_source( # type: ignore[attr-defined]
112
- "learning_rate"
113
- )
114
- if learning_rate_source == ParameterSource.DEFAULT:
115
- learning_rate = 1e-3
135
+ if model_limits.lora_training is None:
136
+ raise click.BadParameter(
137
+ f"LoRA fine-tuning is not supported for the model `{model}`"
138
+ )
139
+
140
+ default_values = {
141
+ "lora_r": model_limits.lora_training.max_rank,
142
+ "batch_size": model_limits.lora_training.max_batch_size,
143
+ "learning_rate": 1e-3,
144
+ }
145
+ for arg in default_values:
146
+ arg_source = ctx.get_parameter_source("arg") # type: ignore[attr-defined]
147
+ if arg_source == ParameterSource.DEFAULT:
148
+ training_args[arg] = default_values[arg_source]
149
+
150
+ if ctx.get_parameter_source("lora_alpha") == ParameterSource.DEFAULT: # type: ignore[attr-defined]
151
+ training_args["lora_alpha"] = training_args["lora_r"] * 2
116
152
  else:
153
+ if model_limits.full_training is None:
154
+ raise click.BadParameter(
155
+ f"Full fine-tuning is not supported for the model `{model}`"
156
+ )
157
+
117
158
  for param in ["lora_r", "lora_dropout", "lora_alpha", "lora_trainable_modules"]:
118
- param_source = click.get_current_context().get_parameter_source(param) # type: ignore[attr-defined]
159
+ param_source = ctx.get_parameter_source(param) # type: ignore[attr-defined]
119
160
  if param_source != ParameterSource.DEFAULT:
120
161
  raise click.BadParameter(
121
162
  f"You set LoRA parameter `{param}` for a full fine-tuning job. "
122
163
  f"Please change the job type with --lora or remove `{param}` from the arguments"
123
164
  )
165
+
166
+ batch_size_source = ctx.get_parameter_source("batch_size") # type: ignore[attr-defined]
167
+ if batch_size_source == ParameterSource.DEFAULT:
168
+ training_args["batch_size"] = model_limits.full_training.max_batch_size
169
+
124
170
  if n_evals <= 0 and validation_file:
125
171
  log_warn(
126
172
  "Warning: You have specified a validation file but the number of evaluation loops is set to 0. No evaluations will be performed."
@@ -1,5 +1,6 @@
1
1
  import base64
2
2
  import pathlib
3
+ import requests
3
4
 
4
5
  import click
5
6
  from PIL import Image
@@ -70,8 +71,18 @@ def generate(
70
71
  for i, choice in enumerate(response.data):
71
72
  assert isinstance(choice, ImageChoicesData)
72
73
 
74
+ data = None
75
+ if choice.b64_json:
76
+ data = base64.b64decode(choice.b64_json)
77
+ elif choice.url:
78
+ data = requests.get(choice.url).content
79
+
80
+ if not data:
81
+ click.echo(f"Image [{i + 1}/{len(response.data)}] is empty")
82
+ continue
83
+
73
84
  with open(f"{output}/{prefix}{choice.index}.png", "wb") as f:
74
- f.write(base64.b64decode(choice.b64_json))
85
+ f.write(data)
75
86
 
76
87
  click.echo(
77
88
  f"Image [{i + 1}/{len(response.data)}] saved to {output}/{prefix}{choice.index}.png"
@@ -0,0 +1,21 @@
1
+ import click
2
+
3
+ from typing import Literal
4
+
5
+
6
+ class AutoIntParamType(click.ParamType):
7
+ name = "integer"
8
+
9
+ def convert(
10
+ self, value: str, param: click.Parameter | None, ctx: click.Context | None
11
+ ) -> int | Literal["max"] | None:
12
+ if isinstance(value, int):
13
+ return value
14
+
15
+ if value == "max":
16
+ return "max"
17
+
18
+ self.fail("Invalid integer value: {value}")
19
+
20
+
21
+ INT_WITH_MAX = AutoIntParamType()
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import warnings
4
- from typing import Any, Dict, List
4
+ from typing import Any, Dict, List, Literal
5
5
 
6
6
  import together
7
7
  from together.legacy.base import API_KEY_WARNING, deprecated
@@ -43,7 +43,7 @@ class Finetune:
43
43
  model=model,
44
44
  n_epochs=n_epochs,
45
45
  n_checkpoints=n_checkpoints,
46
- batch_size=batch_size,
46
+ batch_size=batch_size if isinstance(batch_size, int) else "max",
47
47
  learning_rate=learning_rate,
48
48
  suffix=suffix,
49
49
  wandb_api_key=wandb_api_key,
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from pathlib import Path
4
+ from typing import Literal
4
5
 
5
6
  from rich import print as rprint
6
7
 
@@ -13,6 +14,7 @@ from together.types import (
13
14
  FinetuneListEvents,
14
15
  FinetuneRequest,
15
16
  FinetuneResponse,
17
+ FinetuneTrainingLimits,
16
18
  FullTrainingType,
17
19
  LoRATrainingType,
18
20
  TogetherClient,
@@ -20,7 +22,7 @@ from together.types import (
20
22
  TrainingType,
21
23
  )
22
24
  from together.types.finetune import DownloadCheckpointType
23
- from together.utils import log_warn, normalize_key
25
+ from together.utils import log_warn_once, normalize_key
24
26
 
25
27
 
26
28
  class FineTuning:
@@ -36,16 +38,17 @@ class FineTuning:
36
38
  validation_file: str | None = "",
37
39
  n_evals: int | None = 0,
38
40
  n_checkpoints: int | None = 1,
39
- batch_size: int | None = 16,
41
+ batch_size: int | Literal["max"] = "max",
40
42
  learning_rate: float | None = 0.00001,
41
43
  lora: bool = False,
42
- lora_r: int | None = 8,
44
+ lora_r: int | None = None,
43
45
  lora_dropout: float | None = 0,
44
- lora_alpha: float | None = 8,
46
+ lora_alpha: float | None = None,
45
47
  lora_trainable_modules: str | None = "all-linear",
46
48
  suffix: str | None = None,
47
49
  wandb_api_key: str | None = None,
48
50
  verbose: bool = False,
51
+ model_limits: FinetuneTrainingLimits | None = None,
49
52
  ) -> FinetuneResponse:
50
53
  """
51
54
  Method to initiate a fine-tuning job
@@ -58,7 +61,7 @@ class FineTuning:
58
61
  n_evals (int, optional): Number of evaluation loops to run. Defaults to 0.
59
62
  n_checkpoints (int, optional): Number of checkpoints to save during fine-tuning.
60
63
  Defaults to 1.
61
- batch_size (int, optional): Batch size for fine-tuning. Defaults to 32.
64
+ batch_size (int, optional): Batch size for fine-tuning. Defaults to max.
62
65
  learning_rate (float, optional): Learning rate multiplier to use for training
63
66
  Defaults to 0.00001.
64
67
  lora (bool, optional): Whether to use LoRA adapters. Defaults to True.
@@ -72,17 +75,36 @@ class FineTuning:
72
75
  Defaults to None.
73
76
  verbose (bool, optional): whether to print the job parameters before submitting a request.
74
77
  Defaults to False.
78
+ model_limits (FinetuneTrainingLimits, optional): Limits for the hyperparameters the model in Fine-tuning.
79
+ Defaults to None.
75
80
 
76
81
  Returns:
77
82
  FinetuneResponse: Object containing information about fine-tuning job.
78
83
  """
79
84
 
85
+ if batch_size == "max":
86
+ log_warn_once(
87
+ "Starting from together>=1.3.0, "
88
+ "the default batch size is set to the maximum allowed value for each model."
89
+ )
90
+
80
91
  requestor = api_requestor.APIRequestor(
81
92
  client=self._client,
82
93
  )
83
94
 
95
+ if model_limits is None:
96
+ model_limits = self.get_model_limits(model=model)
97
+
84
98
  training_type: TrainingType = FullTrainingType()
85
99
  if lora:
100
+ if model_limits.lora_training is None:
101
+ raise ValueError(
102
+ "LoRA adapters are not supported for the selected model."
103
+ )
104
+ lora_r = (
105
+ lora_r if lora_r is not None else model_limits.lora_training.max_rank
106
+ )
107
+ lora_alpha = lora_alpha if lora_alpha is not None else lora_r * 2
86
108
  training_type = LoRATrainingType(
87
109
  lora_r=lora_r,
88
110
  lora_alpha=lora_alpha,
@@ -90,6 +112,22 @@ class FineTuning:
90
112
  lora_trainable_modules=lora_trainable_modules,
91
113
  )
92
114
 
115
+ batch_size = (
116
+ batch_size
117
+ if batch_size != "max"
118
+ else model_limits.lora_training.max_batch_size
119
+ )
120
+ else:
121
+ if model_limits.full_training is None:
122
+ raise ValueError(
123
+ "Full training is not supported for the selected model."
124
+ )
125
+ batch_size = (
126
+ batch_size
127
+ if batch_size != "max"
128
+ else model_limits.full_training.max_batch_size
129
+ )
130
+
93
131
  finetune_request = FinetuneRequest(
94
132
  model=model,
95
133
  training_file=training_file,
@@ -121,12 +159,6 @@ class FineTuning:
121
159
 
122
160
  assert isinstance(response, TogetherResponse)
123
161
 
124
- # TODO: Remove after next LoRA default change
125
- log_warn(
126
- "Some of the jobs run _directly_ from the together-python library might be trained using LoRA adapters. "
127
- "The version range when this change occurred is from 1.2.3 to 1.2.6."
128
- )
129
-
130
162
  return FinetuneResponse(**response.data)
131
163
 
132
164
  def list(self) -> FinetuneList:
@@ -305,6 +337,34 @@ class FineTuning:
305
337
  size=file_size,
306
338
  )
307
339
 
340
+ def get_model_limits(self, *, model: str) -> FinetuneTrainingLimits:
341
+ """
342
+ Requests training limits for a specific model
343
+
344
+ Args:
345
+ model_name (str): Name of the model to get limits for
346
+
347
+ Returns:
348
+ FinetuneTrainingLimits: Object containing training limits for the model
349
+ """
350
+
351
+ requestor = api_requestor.APIRequestor(
352
+ client=self._client,
353
+ )
354
+
355
+ model_limits_response, _, _ = requestor.request(
356
+ options=TogetherRequest(
357
+ method="GET",
358
+ url="fine-tunes/models/limits",
359
+ params={"model_name": model},
360
+ ),
361
+ stream=False,
362
+ )
363
+
364
+ model_limits = FinetuneTrainingLimits(**model_limits_response.data)
365
+
366
+ return model_limits
367
+
308
368
 
309
369
  class AsyncFineTuning:
310
370
  def __init__(self, client: TogetherClient) -> None:
@@ -493,3 +553,31 @@ class AsyncFineTuning:
493
553
  "AsyncFineTuning.download not implemented. "
494
554
  "Please use FineTuning.download function instead."
495
555
  )
556
+
557
+ async def get_model_limits(self, *, model: str) -> FinetuneTrainingLimits:
558
+ """
559
+ Requests training limits for a specific model
560
+
561
+ Args:
562
+ model_name (str): Name of the model to get limits for
563
+
564
+ Returns:
565
+ FinetuneTrainingLimits: Object containing training limits for the model
566
+ """
567
+
568
+ requestor = api_requestor.APIRequestor(
569
+ client=self._client,
570
+ )
571
+
572
+ model_limits_response, _, _ = await requestor.arequest(
573
+ options=TogetherRequest(
574
+ method="GET",
575
+ url="fine-tunes/models/limits",
576
+ params={"model": model},
577
+ ),
578
+ stream=False,
579
+ )
580
+
581
+ model_limits = FinetuneTrainingLimits(**model_limits_response.data)
582
+
583
+ return model_limits
@@ -29,6 +29,7 @@ from together.types.finetune import (
29
29
  FullTrainingType,
30
30
  LoRATrainingType,
31
31
  TrainingType,
32
+ FinetuneTrainingLimits,
32
33
  )
33
34
  from together.types.images import (
34
35
  ImageRequest,
@@ -71,4 +72,5 @@ __all__ = [
71
72
  "LoRATrainingType",
72
73
  "RerankRequest",
73
74
  "RerankResponse",
75
+ "FinetuneTrainingLimits",
74
76
  ]
@@ -263,3 +263,21 @@ class FinetuneDownloadResult(BaseModel):
263
263
  filename: str | None = None
264
264
  # size in bytes
265
265
  size: int | None = None
266
+
267
+
268
+ class FinetuneFullTrainingLimits(BaseModel):
269
+ max_batch_size: int
270
+ min_batch_size: int
271
+
272
+
273
+ class FinetuneLoraTrainingLimits(FinetuneFullTrainingLimits):
274
+ max_rank: int
275
+ target_modules: List[str]
276
+
277
+
278
+ class FinetuneTrainingLimits(BaseModel):
279
+ max_num_epochs: int
280
+ max_learning_rate: float
281
+ min_learning_rate: float
282
+ full_training: FinetuneFullTrainingLimits | None = None
283
+ lora_training: FinetuneLoraTrainingLimits | None = None
together/types/images.py CHANGED
@@ -28,7 +28,9 @@ class ImageChoicesData(BaseModel):
28
28
  # response index
29
29
  index: int
30
30
  # base64 image response
31
- b64_json: str
31
+ b64_json: str | None = None
32
+ # URL hosting image
33
+ url: str | None = None
32
34
 
33
35
 
34
36
  class ImageResponse(BaseModel):
@@ -1,4 +1,4 @@
1
- from together.utils._log import log_debug, log_info, log_warn, logfmt
1
+ from together.utils._log import log_debug, log_info, log_warn, log_warn_once, logfmt
2
2
  from together.utils.api_helpers import default_api_key, get_headers
3
3
  from together.utils.files import check_file
4
4
  from together.utils.tools import (
@@ -18,6 +18,7 @@ __all__ = [
18
18
  "log_debug",
19
19
  "log_info",
20
20
  "log_warn",
21
+ "log_warn_once",
21
22
  "logfmt",
22
23
  "enforce_trailing_slash",
23
24
  "normalize_key",
together/utils/_log.py CHANGED
@@ -13,6 +13,8 @@ logger = logging.getLogger("together")
13
13
 
14
14
  TOGETHER_LOG = os.environ.get("TOGETHER_LOG")
15
15
 
16
+ WARNING_MESSAGES_ONCE = set()
17
+
16
18
 
17
19
  def _console_log_level() -> str | None:
18
20
  if together.log in ["debug", "info"]:
@@ -59,3 +61,11 @@ def log_warn(message: str | Any, **params: Any) -> None:
59
61
  msg = logfmt(dict(message=message, **params))
60
62
  print(msg, file=sys.stderr)
61
63
  logger.warn(msg)
64
+
65
+
66
+ def log_warn_once(message: str | Any, **params: Any) -> None:
67
+ msg = logfmt(dict(message=message, **params))
68
+ if msg not in WARNING_MESSAGES_ONCE:
69
+ print(msg, file=sys.stderr)
70
+ logger.warn(msg)
71
+ WARNING_MESSAGES_ONCE.add(msg)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.2.13
3
+ Version: 1.3.1
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -277,7 +277,7 @@ client.fine_tuning.create(
277
277
  model = 'mistralai/Mixtral-8x7B-Instruct-v0.1',
278
278
  n_epochs = 3,
279
279
  n_checkpoints = 1,
280
- batch_size = 4,
280
+ batch_size = "max",
281
281
  learning_rate = 1e-5,
282
282
  suffix = 'my-demo-finetune',
283
283
  wandb_api_key = '1a2b3c4d5e.......',
@@ -6,9 +6,10 @@ together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
9
- together/cli/api/finetune.py,sha256=21I_m4hIjSj5vRCNejQUzry3R-qzwd89W_LyIh1Q7Ts,10086
10
- together/cli/api/images.py,sha256=01dFYa2sK1HqUwVCD9FlwcjqkYWLoNxFZkzok13EriE,2363
9
+ together/cli/api/finetune.py,sha256=1oZo2zJGMcUHpnEJuCYsY_w4MAYjUuVG9coCvmZJv74,11801
10
+ together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
11
11
  together/cli/api/models.py,sha256=xWEzu8ZpxM_Pz9KEjRPRVuv_v22RayYZ4QcgiezT5tE,1126
12
+ together/cli/api/utils.py,sha256=IoMIilvBOE5zdq6OgCU2Qa9p_CbrsXCR7b9q3MWu9S0,446
12
13
  together/cli/cli.py,sha256=RC0tgapkSOFjsRPg8p-8dx9D2LDzm8YmVCHUjk_aVyQ,1977
13
14
  together/client.py,sha256=mOlIFjjE9eSTb0o_weaKJwm8qvWNKHDiMmp8kQ7y68I,4946
14
15
  together/constants.py,sha256=6DAvMTrGYI73gUFRbfBdLfDxksucpKjKsiH07PGtSSM,906
@@ -19,7 +20,7 @@ together/legacy/base.py,sha256=ehrX1SCfRbK5OA83wL1q7-tfF-yuZOUxzjxYfFtdvvQ,727
19
20
  together/legacy/complete.py,sha256=NRJX-vjnkg4HrgDo9LS3jFfhwfXpeGxcl24dcrLPK3A,2439
20
21
  together/legacy/embeddings.py,sha256=nyTERjyPLTm7Sc987a9FJt1adnW7gIa7xs2CwXLE9EI,635
21
22
  together/legacy/files.py,sha256=qmAqMiNTPWb6WvLV5Tsv6kxGRfQ31q7OkHZNFwkw8v0,4082
22
- together/legacy/finetune.py,sha256=k-lERbZLEZlW1QQ9A9zhhwl5KIPjf_jT0R0LSiLbD2Y,5063
23
+ together/legacy/finetune.py,sha256=LENaqegeb1PszXDbAhTNPro7T3isz6X_IICIOKH7dKE,5114
23
24
  together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
24
25
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
25
26
  together/resources/__init__.py,sha256=7BLdBCNUbgi5mz30EFfdkdIYiGfFCkiUbdNzMY1-igY,792
@@ -28,12 +29,12 @@ together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxI
28
29
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
29
30
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
30
31
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
31
- together/resources/finetune.py,sha256=rSj5keJdouxDSMyEQ64WlSG-tkc0YY2jcB-oD9SjDPQ,16194
32
+ together/resources/finetune.py,sha256=c0nkOepUUjlwnvl8txWsMedVI4Q3-HcfquPBfP12Qkw,19047
32
33
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
33
34
  together/resources/models.py,sha256=2dtHhXAqTDOOpwSbYLzWcKTC0-m2Szlb7LDYvp7Jr4w,1786
34
35
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
35
36
  together/together_response.py,sha256=MhczUCPem93cjX-A1TOAUrRj3sO-o3SLcEcTsZgVzQI,1319
36
- together/types/__init__.py,sha256=ghMiyyR2UzY-Io9Ck3ocwmS6_XSO9VaYWwbLqPDSZfo,1681
37
+ together/types/__init__.py,sha256=oHZCMC0H3j1ykf7ZRgxIU0QBA534EMpfKqRaa9SdgOo,1739
37
38
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
38
39
  together/types/chat_completions.py,sha256=d24F3VfT7uVnmaEk7Fn-O7qkGUg_AQQzR7vPwlXVDXw,4882
39
40
  together/types/common.py,sha256=4ZeIgqGioqhIC-nNxY90czNPp-kAqboMulw6-1z6ShM,1511
@@ -41,18 +42,18 @@ together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4
41
42
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
42
43
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
43
44
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
44
- together/types/finetune.py,sha256=UqZH98L3vVxZ6vykE5rmZFdpYQrqrkTvotZIiyqAME0,7362
45
- together/types/images.py,sha256=zX4Vt38tFDKU6yGb_hBY_N5eSTn3KPdpP5Ce_qnRHXQ,915
45
+ together/types/finetune.py,sha256=8oYZPNgMWw6xGT3kRzAF4LlWjSfVti4uDVi6FoVJDM0,7814
46
+ together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
46
47
  together/types/models.py,sha256=K9Om3cCFexy7qzRSEXUj7gpCy1CVb1hHx7MGG-hvTLw,1035
47
48
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
48
- together/utils/__init__.py,sha256=VpjeRTya1m5eEE-Qe1zYTFsNAvuEA-dy7M2eG9Xu4fc,662
49
- together/utils/_log.py,sha256=yzdOV6iBEsyqF8UVvKhZm-ATtRokm34V-dXjTv3WKdE,1665
49
+ together/utils/__init__.py,sha256=n1kmLiaExT9YOKT5ye--dC4tW2qcHeicKX0GR86U640,698
50
+ together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
50
51
  together/utils/api_helpers.py,sha256=RSF7SRhbjHzroMOSWAXscflByM1r1ta_1SpxkAT22iE,2407
51
52
  together/utils/files.py,sha256=gMLthqfP5hKxVAerHMdy7gLXzdfY6lyOXdpW24Y4X3I,7165
52
53
  together/utils/tools.py,sha256=3-lXWP3cBCzOVSZg9tr5zOT1jaVeKAKVWxO2fcXZTh8,1788
53
54
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
54
- together-1.2.13.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
55
- together-1.2.13.dist-info/METADATA,sha256=RP0IebBK1ge06MLZ1Ixh_AZyIK6jT4yczMZvfhzxeJE,11852
56
- together-1.2.13.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
57
- together-1.2.13.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
58
- together-1.2.13.dist-info/RECORD,,
55
+ together-1.3.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
56
+ together-1.3.1.dist-info/METADATA,sha256=c5f1aaAoXmHuT21Pf2Z2LTCNlun8gEoinVBe4n0nER8,11855
57
+ together-1.3.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
58
+ together-1.3.1.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
59
+ together-1.3.1.dist-info/RECORD,,