together 1.2.11__py3-none-any.whl → 1.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,15 +1,27 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
+ from datetime import datetime
4
5
  from textwrap import wrap
5
6
 
6
7
  import click
7
8
  from click.core import ParameterSource # type: ignore[attr-defined]
9
+ from rich import print as rprint
8
10
  from tabulate import tabulate
9
11
 
10
12
  from together import Together
11
- from together.utils import finetune_price_to_dollars, log_warn, parse_timestamp
12
13
  from together.types.finetune import DownloadCheckpointType
14
+ from together.utils import finetune_price_to_dollars, log_warn, parse_timestamp
15
+
16
+
17
+ _CONFIRMATION_MESSAGE = (
18
+ "You are about to create a fine-tuning job. "
19
+ "The cost of your job will be determined by the model size, the number of tokens "
20
+ "in the training file, the number of tokens in the validation file, the number of epochs, and "
21
+ "the number of evaluations. Visit https://www.together.ai/pricing to get a price estimate.\n"
22
+ "You can pass `-y` or `--confirm` to your command to skip this message.\n\n"
23
+ "Do you want to proceed?"
24
+ )
13
25
 
14
26
 
15
27
  class DownloadCheckpointTypeChoice(click.Choice):
@@ -65,6 +77,14 @@ def fine_tuning(ctx: click.Context) -> None:
65
77
  "--suffix", type=str, default=None, help="Suffix for the fine-tuned model name"
66
78
  )
67
79
  @click.option("--wandb-api-key", type=str, default=None, help="Wandb API key")
80
+ @click.option(
81
+ "--confirm",
82
+ "-y",
83
+ type=bool,
84
+ is_flag=True,
85
+ default=False,
86
+ help="Whether to skip the launch confirmation message",
87
+ )
68
88
  def create(
69
89
  ctx: click.Context,
70
90
  training_file: str,
@@ -82,6 +102,7 @@ def create(
82
102
  lora_trainable_modules: str,
83
103
  suffix: str,
84
104
  wandb_api_key: str,
105
+ confirm: bool,
85
106
  ) -> None:
86
107
  """Start fine-tuning"""
87
108
  client: Together = ctx.obj
@@ -109,30 +130,37 @@ def create(
109
130
  "You have specified a number of evaluation loops but no validation file."
110
131
  )
111
132
 
112
- response = client.fine_tuning.create(
113
- training_file=training_file,
114
- model=model,
115
- n_epochs=n_epochs,
116
- validation_file=validation_file,
117
- n_evals=n_evals,
118
- n_checkpoints=n_checkpoints,
119
- batch_size=batch_size,
120
- learning_rate=learning_rate,
121
- lora=lora,
122
- lora_r=lora_r,
123
- lora_dropout=lora_dropout,
124
- lora_alpha=lora_alpha,
125
- lora_trainable_modules=lora_trainable_modules,
126
- suffix=suffix,
127
- wandb_api_key=wandb_api_key,
128
- )
129
-
130
- click.echo(json.dumps(response.model_dump(exclude_none=True), indent=4))
133
+ if confirm or click.confirm(_CONFIRMATION_MESSAGE, default=True, show_default=True):
134
+ response = client.fine_tuning.create(
135
+ training_file=training_file,
136
+ model=model,
137
+ n_epochs=n_epochs,
138
+ validation_file=validation_file,
139
+ n_evals=n_evals,
140
+ n_checkpoints=n_checkpoints,
141
+ batch_size=batch_size,
142
+ learning_rate=learning_rate,
143
+ lora=lora,
144
+ lora_r=lora_r,
145
+ lora_dropout=lora_dropout,
146
+ lora_alpha=lora_alpha,
147
+ lora_trainable_modules=lora_trainable_modules,
148
+ suffix=suffix,
149
+ wandb_api_key=wandb_api_key,
150
+ verbose=True,
151
+ )
131
152
 
132
- # TODO: Remove it after the 21st of August
133
- log_warn(
134
- "The default value of batch size has been changed from 32 to 16 since together version >= 1.2.6"
135
- )
153
+ report_string = f"Successfully submitted a fine-tuning job {response.id}"
154
+ if response.created_at is not None:
155
+ created_time = datetime.strptime(
156
+ response.created_at, "%Y-%m-%dT%H:%M:%S.%f%z"
157
+ )
158
+ # created_at reports UTC time, we use .astimezone() to convert to local time
159
+ formatted_time = created_time.astimezone().strftime("%m/%d/%Y, %H:%M:%S")
160
+ report_string += f" at {formatted_time}"
161
+ rprint(report_string)
162
+ else:
163
+ click.echo("No confirmation received, stopping job launch")
136
164
 
137
165
 
138
166
  @fine_tuning.command()
together/constants.py CHANGED
@@ -19,7 +19,7 @@ Please set it as an environment variable or set it as together.api_key
19
19
  Find your TOGETHER_API_KEY at https://api.together.xyz/settings/api-keys"""
20
20
 
21
21
  # Minimum number of samples required for fine-tuning file
22
- MIN_SAMPLES = 100
22
+ MIN_SAMPLES = 1
23
23
 
24
24
  # the number of bytes in a gigabyte, used to convert bytes to GB for readable comparison
25
25
  NUM_BYTES_IN_GB = 2**30
@@ -20,7 +20,7 @@ class ChatCompletions:
20
20
  def create(
21
21
  self,
22
22
  *,
23
- messages: List[Dict[str, str]],
23
+ messages: List[Dict[str, Any]],
24
24
  model: str,
25
25
  max_tokens: int | None = None,
26
26
  stop: List[str] | None = None,
@@ -32,13 +32,14 @@ class ChatCompletions:
32
32
  frequency_penalty: float | None = None,
33
33
  min_p: float | None = None,
34
34
  logit_bias: Dict[str, float] | None = None,
35
+ seed: int | None = None,
35
36
  stream: bool = False,
36
37
  logprobs: int | None = None,
37
38
  echo: bool | None = None,
38
39
  n: int | None = None,
39
40
  safety_model: str | None = None,
40
41
  response_format: Dict[str, str | Dict[str, Any]] | None = None,
41
- tools: Dict[str, str | Dict[str, Any]] | None = None,
42
+ tools: List[Dict[str, Any]] | None = None,
42
43
  tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
43
44
  **kwargs: Any,
44
45
  ) -> ChatCompletionResponse | Iterator[ChatCompletionChunk]:
@@ -79,6 +80,7 @@ class ChatCompletions:
79
80
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
80
81
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
81
82
  Defaults to None.
83
+ seed (int, optional): A seed value to use for reproducibility.
82
84
  stream (bool, optional): Flag indicating whether to stream the generated completions.
83
85
  Defaults to False.
84
86
  logprobs (int, optional): Number of top-k logprobs to return
@@ -124,6 +126,7 @@ class ChatCompletions:
124
126
  frequency_penalty=frequency_penalty,
125
127
  min_p=min_p,
126
128
  logit_bias=logit_bias,
129
+ seed=seed,
127
130
  stream=stream,
128
131
  logprobs=logprobs,
129
132
  echo=echo,
@@ -171,6 +174,7 @@ class AsyncChatCompletions:
171
174
  frequency_penalty: float | None = None,
172
175
  min_p: float | None = None,
173
176
  logit_bias: Dict[str, float] | None = None,
177
+ seed: int | None = None,
174
178
  stream: bool = False,
175
179
  logprobs: int | None = None,
176
180
  echo: bool | None = None,
@@ -218,6 +222,7 @@ class AsyncChatCompletions:
218
222
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
219
223
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
220
224
  Defaults to None.
225
+ seed (int, optional): A seed value to use for reproducibility.
221
226
  stream (bool, optional): Flag indicating whether to stream the generated completions.
222
227
  Defaults to False.
223
228
  logprobs (int, optional): Number of top-k logprobs to return
@@ -263,6 +268,7 @@ class AsyncChatCompletions:
263
268
  frequency_penalty=frequency_penalty,
264
269
  min_p=min_p,
265
270
  logit_bias=logit_bias,
271
+ seed=seed,
266
272
  stream=stream,
267
273
  logprobs=logprobs,
268
274
  echo=echo,
@@ -32,6 +32,7 @@ class Completions:
32
32
  frequency_penalty: float | None = None,
33
33
  min_p: float | None = None,
34
34
  logit_bias: Dict[str, float] | None = None,
35
+ seed: int | None = None,
35
36
  stream: bool = False,
36
37
  logprobs: int | None = None,
37
38
  echo: bool | None = None,
@@ -75,6 +76,7 @@ class Completions:
75
76
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
76
77
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
77
78
  Defaults to None.
79
+ seed (int, optional): Seed value for reproducibility.
78
80
  stream (bool, optional): Flag indicating whether to stream the generated completions.
79
81
  Defaults to False.
80
82
  logprobs (int, optional): Number of top-k logprobs to return
@@ -107,6 +109,7 @@ class Completions:
107
109
  repetition_penalty=repetition_penalty,
108
110
  presence_penalty=presence_penalty,
109
111
  frequency_penalty=frequency_penalty,
112
+ seed=seed,
110
113
  min_p=min_p,
111
114
  logit_bias=logit_bias,
112
115
  stream=stream,
@@ -153,6 +156,7 @@ class AsyncCompletions:
153
156
  frequency_penalty: float | None = None,
154
157
  min_p: float | None = None,
155
158
  logit_bias: Dict[str, float] | None = None,
159
+ seed: int | None = None,
156
160
  stream: bool = False,
157
161
  logprobs: int | None = None,
158
162
  echo: bool | None = None,
@@ -196,6 +200,7 @@ class AsyncCompletions:
196
200
  logit_bias (Dict[str, float], optional): A dictionary of tokens and their bias values that modify the
197
201
  likelihood of specific tokens being sampled. Bias values must be in the range [-100, 100].
198
202
  Defaults to None.
203
+ seed (int, optional): Seed value for reproducibility.
199
204
  stream (bool, optional): Flag indicating whether to stream the generated completions.
200
205
  Defaults to False.
201
206
  logprobs (int, optional): Number of top-k logprobs to return
@@ -230,6 +235,7 @@ class AsyncCompletions:
230
235
  frequency_penalty=frequency_penalty,
231
236
  min_p=min_p,
232
237
  logit_bias=logit_bias,
238
+ seed=seed,
233
239
  stream=stream,
234
240
  logprobs=logprobs,
235
241
  echo=echo,
@@ -2,6 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  from pathlib import Path
4
4
 
5
+ from rich import print as rprint
6
+
5
7
  from together.abstract import api_requestor
6
8
  from together.filemanager import DownloadManager
7
9
  from together.together_response import TogetherResponse
@@ -43,6 +45,7 @@ class FineTuning:
43
45
  lora_trainable_modules: str | None = "all-linear",
44
46
  suffix: str | None = None,
45
47
  wandb_api_key: str | None = None,
48
+ verbose: bool = False,
46
49
  ) -> FinetuneResponse:
47
50
  """
48
51
  Method to initiate a fine-tuning job
@@ -67,6 +70,8 @@ class FineTuning:
67
70
  Defaults to None.
68
71
  wandb_api_key (str, optional): API key for Weights & Biases integration.
69
72
  Defaults to None.
73
+ verbose (bool, optional): whether to print the job parameters before submitting a request.
74
+ Defaults to False.
70
75
 
71
76
  Returns:
72
77
  FinetuneResponse: Object containing information about fine-tuning job.
@@ -85,7 +90,7 @@ class FineTuning:
85
90
  lora_trainable_modules=lora_trainable_modules,
86
91
  )
87
92
 
88
- parameter_payload = FinetuneRequest(
93
+ finetune_request = FinetuneRequest(
89
94
  model=model,
90
95
  training_file=training_file,
91
96
  validation_file=validation_file,
@@ -97,7 +102,13 @@ class FineTuning:
97
102
  training_type=training_type,
98
103
  suffix=suffix,
99
104
  wandb_key=wandb_api_key,
100
- ).model_dump(exclude_none=True)
105
+ )
106
+ if verbose:
107
+ rprint(
108
+ "Submitting a fine-tuning job with the following parameters:",
109
+ finetune_request,
110
+ )
111
+ parameter_payload = finetune_request.model_dump(exclude_none=True)
101
112
 
102
113
  response, _, _ = requestor.request(
103
114
  options=TogetherRequest(
@@ -110,11 +121,6 @@ class FineTuning:
110
121
 
111
122
  assert isinstance(response, TogetherResponse)
112
123
 
113
- # TODO: Remove it after the 21st of August
114
- log_warn(
115
- "The default value of batch size has been changed from 32 to 16 since together version >= 1.2.6"
116
- )
117
-
118
124
  # TODO: Remove after next LoRA default change
119
125
  log_warn(
120
126
  "Some of the jobs run _directly_ from the together-python library might be trained using LoRA adapters. "
@@ -266,7 +272,7 @@ class FineTuning:
266
272
  raise ValueError(
267
273
  "Only DEFAULT checkpoint type is allowed for FullTrainingType"
268
274
  )
269
- url += f"&checkpoint=modelOutputPath"
275
+ url += "&checkpoint=modelOutputPath"
270
276
  elif isinstance(ft_job.training_type, LoRATrainingType):
271
277
  if checkpoint_type == DownloadCheckpointType.DEFAULT:
272
278
  checkpoint_type = DownloadCheckpointType.MERGED
@@ -40,9 +40,24 @@ class ToolCalls(BaseModel):
40
40
  function: FunctionCall | None = None
41
41
 
42
42
 
43
+ class ChatCompletionMessageContentType(str, Enum):
44
+ TEXT = "text"
45
+ IMAGE_URL = "image_url"
46
+
47
+
48
+ class ChatCompletionMessageContentImageURL(BaseModel):
49
+ url: str
50
+
51
+
52
+ class ChatCompletionMessageContent(BaseModel):
53
+ type: ChatCompletionMessageContentType
54
+ text: str | None = None
55
+ image_url: ChatCompletionMessageContentImageURL | None = None
56
+
57
+
43
58
  class ChatCompletionMessage(BaseModel):
44
59
  role: MessageRole
45
- content: str | None = None
60
+ content: str | List[ChatCompletionMessageContent] | None = None
46
61
  tool_calls: List[ToolCalls] | None = None
47
62
 
48
63
 
@@ -96,6 +111,7 @@ class ChatCompletionRequest(BaseModel):
96
111
  frequency_penalty: float | None = None
97
112
  min_p: float | None = None
98
113
  logit_bias: Dict[str, float] | None = None
114
+ seed: int | None = None
99
115
  # stream SSE token chunks
100
116
  stream: bool = False
101
117
  # return logprobs
@@ -126,6 +142,7 @@ class ChatCompletionRequest(BaseModel):
126
142
  class ChatCompletionChoicesData(BaseModel):
127
143
  index: int | None = None
128
144
  logprobs: LogprobsPart | None = None
145
+ seed: int | None = None
129
146
  finish_reason: FinishReason | None = None
130
147
  message: ChatCompletionMessage | None = None
131
148
 
@@ -150,6 +167,7 @@ class ChatCompletionResponse(BaseModel):
150
167
  class ChatCompletionChoicesChunk(BaseModel):
151
168
  index: int | None = None
152
169
  logprobs: float | None = None
170
+ seed: int | None = None
153
171
  finish_reason: FinishReason | None = None
154
172
  delta: DeltaContent | None = None
155
173
 
@@ -35,6 +35,7 @@ class CompletionRequest(BaseModel):
35
35
  frequency_penalty: float | None = None
36
36
  min_p: float | None = None
37
37
  logit_bias: Dict[str, float] | None = None
38
+ seed: int | None = None
38
39
  # stream SSE token chunks
39
40
  stream: bool = False
40
41
  # return logprobs
@@ -61,13 +62,15 @@ class CompletionRequest(BaseModel):
61
62
  class CompletionChoicesData(BaseModel):
62
63
  index: int
63
64
  logprobs: LogprobsPart | None = None
64
- finish_reason: FinishReason | None = None
65
+ seed: int | None = None
66
+ finish_reason: FinishReason
65
67
  text: str
66
68
 
67
69
 
68
70
  class CompletionChoicesChunk(BaseModel):
69
71
  index: int | None = None
70
72
  logprobs: float | None = None
73
+ seed: int | None = None
71
74
  finish_reason: FinishReason | None = None
72
75
  delta: DeltaContent | None = None
73
76
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: together
3
- Version: 1.2.11
3
+ Version: 1.2.13
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  Home-page: https://github.com/togethercomputer/together-python
6
6
  License: Apache-2.0
@@ -25,6 +25,7 @@ Requires-Dist: pillow (>=10.3.0,<11.0.0)
25
25
  Requires-Dist: pyarrow (>=10.0.1)
26
26
  Requires-Dist: pydantic (>=2.6.3,<3.0.0)
27
27
  Requires-Dist: requests (>=2.31.0,<3.0.0)
28
+ Requires-Dist: rich (>=13.8.1,<14.0.0)
28
29
  Requires-Dist: tabulate (>=0.9.0,<0.10.0)
29
30
  Requires-Dist: tqdm (>=4.66.2,<5.0.0)
30
31
  Requires-Dist: typer (>=0.9,<0.13)
@@ -6,12 +6,12 @@ together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
9
- together/cli/api/finetune.py,sha256=c6-T-5K7sODgQa7ehdqZqyluRaDUzPXCgMeFWm3iIwA,8855
9
+ together/cli/api/finetune.py,sha256=21I_m4hIjSj5vRCNejQUzry3R-qzwd89W_LyIh1Q7Ts,10086
10
10
  together/cli/api/images.py,sha256=01dFYa2sK1HqUwVCD9FlwcjqkYWLoNxFZkzok13EriE,2363
11
11
  together/cli/api/models.py,sha256=xWEzu8ZpxM_Pz9KEjRPRVuv_v22RayYZ4QcgiezT5tE,1126
12
12
  together/cli/cli.py,sha256=RC0tgapkSOFjsRPg8p-8dx9D2LDzm8YmVCHUjk_aVyQ,1977
13
13
  together/client.py,sha256=mOlIFjjE9eSTb0o_weaKJwm8qvWNKHDiMmp8kQ7y68I,4946
14
- together/constants.py,sha256=WHe6JA9TliwgErkCnovWPS9w9xXfA3X5PtKJv_y2JxQ,908
14
+ together/constants.py,sha256=6DAvMTrGYI73gUFRbfBdLfDxksucpKjKsiH07PGtSSM,906
15
15
  together/error.py,sha256=emjhTSsLwiZvW0v1EmYemjacCMtcFIKAXWWK_2IdP18,5419
16
16
  together/filemanager.py,sha256=QHhBn73oVFdgUpSYXYLmJzHJ9c5wYEMJC0ur6ZgDeYo,11269
17
17
  together/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -24,20 +24,20 @@ together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
24
24
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
25
25
  together/resources/__init__.py,sha256=7BLdBCNUbgi5mz30EFfdkdIYiGfFCkiUbdNzMY1-igY,792
26
26
  together/resources/chat/__init__.py,sha256=RsTptdP8MeGjcdIjze896-J27cRvCbUoMft0X2BVlQ8,617
27
- together/resources/chat/completions.py,sha256=AOmm2CRwhz9VZpez35LoOUw432IPbtwCeUQXtCd3RK8,14208
28
- together/resources/completions.py,sha256=C_Djn41zjdWdPn4rnOrAVEHvOaqyBSyxZS4eYqsZdMI,11482
27
+ together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxIY8uqGHcaU,14459
28
+ together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
29
29
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
30
30
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
31
- together/resources/finetune.py,sha256=t8wOulAyNFKlHjHv_u4-PwdL-rR-klgEvDEfH-efdkQ,15970
31
+ together/resources/finetune.py,sha256=rSj5keJdouxDSMyEQ64WlSG-tkc0YY2jcB-oD9SjDPQ,16194
32
32
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
33
33
  together/resources/models.py,sha256=2dtHhXAqTDOOpwSbYLzWcKTC0-m2Szlb7LDYvp7Jr4w,1786
34
34
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
35
35
  together/together_response.py,sha256=MhczUCPem93cjX-A1TOAUrRj3sO-o3SLcEcTsZgVzQI,1319
36
36
  together/types/__init__.py,sha256=ghMiyyR2UzY-Io9Ck3ocwmS6_XSO9VaYWwbLqPDSZfo,1681
37
37
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
38
- together/types/chat_completions.py,sha256=wy_2E2moA2Aw1RVRXG7bLcHqkSULv-MlJZygajbQN_8,4406
38
+ together/types/chat_completions.py,sha256=d24F3VfT7uVnmaEk7Fn-O7qkGUg_AQQzR7vPwlXVDXw,4882
39
39
  together/types/common.py,sha256=4ZeIgqGioqhIC-nNxY90czNPp-kAqboMulw6-1z6ShM,1511
40
- together/types/completions.py,sha256=yydloTQePGaY97Lx-kbkvgCqBFhHFl7jU5s7uf9Ncg0,2901
40
+ together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4,2971
41
41
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
42
42
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
43
43
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
@@ -51,8 +51,8 @@ together/utils/api_helpers.py,sha256=RSF7SRhbjHzroMOSWAXscflByM1r1ta_1SpxkAT22iE
51
51
  together/utils/files.py,sha256=gMLthqfP5hKxVAerHMdy7gLXzdfY6lyOXdpW24Y4X3I,7165
52
52
  together/utils/tools.py,sha256=3-lXWP3cBCzOVSZg9tr5zOT1jaVeKAKVWxO2fcXZTh8,1788
53
53
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
54
- together-1.2.11.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
55
- together-1.2.11.dist-info/METADATA,sha256=dGhkUfJY1_UbjfOmWiFoDIs-QzLFD1ZgBwRy9E7P59Y,11813
56
- together-1.2.11.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
57
- together-1.2.11.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
58
- together-1.2.11.dist-info/RECORD,,
54
+ together-1.2.13.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
55
+ together-1.2.13.dist-info/METADATA,sha256=RP0IebBK1ge06MLZ1Ixh_AZyIK6jT4yczMZvfhzxeJE,11852
56
+ together-1.2.13.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
57
+ together-1.2.13.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
58
+ together-1.2.13.dist-info/RECORD,,