together 2.0.0a8__py3-none-any.whl → 2.0.0a9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
together/_streaming.py CHANGED
@@ -55,30 +55,31 @@ class Stream(Generic[_T]):
55
55
  process_data = self._client._process_response_data
56
56
  iterator = self._iter_events()
57
57
 
58
- for sse in iterator:
59
- if sse.data.startswith("[DONE]"):
60
- break
61
-
62
- if sse.event is None:
63
- data = sse.json()
64
- if is_mapping(data) and data.get("error"):
65
- message = None
66
- error = data.get("error")
67
- if is_mapping(error):
68
- message = error.get("message")
69
- if not message or not isinstance(message, str):
70
- message = "An error occurred during streaming"
71
-
72
- raise APIError(
73
- message=message,
74
- request=self.response.request,
75
- body=data["error"],
76
- )
77
-
78
- yield process_data(data=data, cast_to=cast_to, response=response)
79
-
80
- # As we might not fully consume the response stream, we need to close it explicitly
81
- response.close()
58
+ try:
59
+ for sse in iterator:
60
+ if sse.data.startswith("[DONE]"):
61
+ break
62
+
63
+ if sse.event is None:
64
+ data = sse.json()
65
+ if is_mapping(data) and data.get("error"):
66
+ message = None
67
+ error = data.get("error")
68
+ if is_mapping(error):
69
+ message = error.get("message")
70
+ if not message or not isinstance(message, str):
71
+ message = "An error occurred during streaming"
72
+
73
+ raise APIError(
74
+ message=message,
75
+ request=self.response.request,
76
+ body=data["error"],
77
+ )
78
+
79
+ yield process_data(data=data, cast_to=cast_to, response=response)
80
+ finally:
81
+ # Ensure the response is closed even if the consumer doesn't read all data
82
+ response.close()
82
83
 
83
84
  def __enter__(self) -> Self:
84
85
  return self
@@ -137,30 +138,31 @@ class AsyncStream(Generic[_T]):
137
138
  process_data = self._client._process_response_data
138
139
  iterator = self._iter_events()
139
140
 
140
- async for sse in iterator:
141
- if sse.data.startswith("[DONE]"):
142
- break
143
-
144
- if sse.event is None:
145
- data = sse.json()
146
- if is_mapping(data) and data.get("error"):
147
- message = None
148
- error = data.get("error")
149
- if is_mapping(error):
150
- message = error.get("message")
151
- if not message or not isinstance(message, str):
152
- message = "An error occurred during streaming"
153
-
154
- raise APIError(
155
- message=message,
156
- request=self.response.request,
157
- body=data["error"],
158
- )
159
-
160
- yield process_data(data=data, cast_to=cast_to, response=response)
161
-
162
- # As we might not fully consume the response stream, we need to close it explicitly
163
- await response.aclose()
141
+ try:
142
+ async for sse in iterator:
143
+ if sse.data.startswith("[DONE]"):
144
+ break
145
+
146
+ if sse.event is None:
147
+ data = sse.json()
148
+ if is_mapping(data) and data.get("error"):
149
+ message = None
150
+ error = data.get("error")
151
+ if is_mapping(error):
152
+ message = error.get("message")
153
+ if not message or not isinstance(message, str):
154
+ message = "An error occurred during streaming"
155
+
156
+ raise APIError(
157
+ message=message,
158
+ request=self.response.request,
159
+ body=data["error"],
160
+ )
161
+
162
+ yield process_data(data=data, cast_to=cast_to, response=response)
163
+ finally:
164
+ # Ensure the response is closed even if the consumer doesn't read all data
165
+ await response.aclose()
164
166
 
165
167
  async def __aenter__(self) -> Self:
166
168
  return self
together/_utils/_utils.py CHANGED
@@ -373,7 +373,7 @@ def get_required_header(headers: HeadersLike, header: str) -> str:
373
373
  lower_header = header.lower()
374
374
  if is_mapping_t(headers):
375
375
  # mypy doesn't understand the type narrowing here
376
- for k, v in headers.items(): # type: ignore[misc, has-type]
376
+ for k, v in headers.items(): # type: ignore[misc, has-type, attr-defined]
377
377
  if k.lower() == lower_header and isinstance(v, str): # type: ignore[has-type]
378
378
  return v # type: ignore[has-type]
379
379
 
together/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "together"
4
- __version__ = "2.0.0-alpha.8" # x-release-please-version
4
+ __version__ = "2.0.0-alpha.9" # x-release-please-version
@@ -50,6 +50,8 @@ class TranscriptionsResource(SyncAPIResource):
50
50
  file: FileTypes,
51
51
  diarize: bool | Omit = omit,
52
52
  language: str | Omit = omit,
53
+ max_speakers: int | Omit = omit,
54
+ min_speakers: int | Omit = omit,
53
55
  model: Literal["openai/whisper-large-v3"] | Omit = omit,
54
56
  prompt: str | Omit = omit,
55
57
  response_format: Literal["json", "verbose_json"] | Omit = omit,
@@ -82,6 +84,12 @@ class TranscriptionsResource(SyncAPIResource):
82
84
  language: Optional ISO 639-1 language code. If `auto` is provided, language is
83
85
  auto-detected.
84
86
 
87
+ max_speakers: Maximum number of speakers expected in the audio. Used to improve diarization
88
+ accuracy when the approximate number of speakers is known.
89
+
90
+ min_speakers: Minimum number of speakers expected in the audio. Used to improve diarization
91
+ accuracy when the approximate number of speakers is known.
92
+
85
93
  model: Model to use for transcription
86
94
 
87
95
  prompt: Optional text to bias decoding.
@@ -107,6 +115,8 @@ class TranscriptionsResource(SyncAPIResource):
107
115
  "file": file,
108
116
  "diarize": diarize,
109
117
  "language": language,
118
+ "max_speakers": max_speakers,
119
+ "min_speakers": min_speakers,
110
120
  "model": model,
111
121
  "prompt": prompt,
112
122
  "response_format": response_format,
@@ -161,6 +171,8 @@ class AsyncTranscriptionsResource(AsyncAPIResource):
161
171
  file: FileTypes,
162
172
  diarize: bool | Omit = omit,
163
173
  language: str | Omit = omit,
174
+ max_speakers: int | Omit = omit,
175
+ min_speakers: int | Omit = omit,
164
176
  model: Literal["openai/whisper-large-v3"] | Omit = omit,
165
177
  prompt: str | Omit = omit,
166
178
  response_format: Literal["json", "verbose_json"] | Omit = omit,
@@ -193,6 +205,12 @@ class AsyncTranscriptionsResource(AsyncAPIResource):
193
205
  language: Optional ISO 639-1 language code. If `auto` is provided, language is
194
206
  auto-detected.
195
207
 
208
+ max_speakers: Maximum number of speakers expected in the audio. Used to improve diarization
209
+ accuracy when the approximate number of speakers is known.
210
+
211
+ min_speakers: Minimum number of speakers expected in the audio. Used to improve diarization
212
+ accuracy when the approximate number of speakers is known.
213
+
196
214
  model: Model to use for transcription
197
215
 
198
216
  prompt: Optional text to bias decoding.
@@ -218,6 +236,8 @@ class AsyncTranscriptionsResource(AsyncAPIResource):
218
236
  "file": file,
219
237
  "diarize": diarize,
220
238
  "language": language,
239
+ "max_speakers": max_speakers,
240
+ "min_speakers": min_speakers,
221
241
  "model": model,
222
242
  "prompt": prompt,
223
243
  "response_format": response_format,
@@ -8,7 +8,7 @@ from typing_extensions import Literal
8
8
  import httpx
9
9
 
10
10
  from ..types import image_generate_params
11
- from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
11
+ from .._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
12
12
  from .._utils import maybe_transform, async_maybe_transform
13
13
  from .._compat import cached_property
14
14
  from .._resource import SyncAPIResource, AsyncAPIResource
@@ -64,6 +64,7 @@ class ImagesResource(SyncAPIResource):
64
64
  n: int | Omit = omit,
65
65
  negative_prompt: str | Omit = omit,
66
66
  output_format: Literal["jpeg", "png"] | Omit = omit,
67
+ reference_images: SequenceNotStr[str] | Omit = omit,
67
68
  response_format: Literal["base64", "url"] | Omit = omit,
68
69
  seed: int | Omit = omit,
69
70
  steps: int | Omit = omit,
@@ -105,6 +106,10 @@ class ImagesResource(SyncAPIResource):
105
106
  output_format: The format of the image response. Can be either be `jpeg` or `png`. Defaults to
106
107
  `jpeg`.
107
108
 
109
+ reference_images: An array of image URLs that guide the overall appearance and style of the
110
+ generated image. These reference images influence the visual characteristics
111
+ consistently across the generation.
112
+
108
113
  response_format: Format of the image response. Can be either a base64 string or a URL.
109
114
 
110
115
  seed: Seed used for generation. Can be used to reproduce image generations.
@@ -135,6 +140,7 @@ class ImagesResource(SyncAPIResource):
135
140
  "n": n,
136
141
  "negative_prompt": negative_prompt,
137
142
  "output_format": output_format,
143
+ "reference_images": reference_images,
138
144
  "response_format": response_format,
139
145
  "seed": seed,
140
146
  "steps": steps,
@@ -189,6 +195,7 @@ class AsyncImagesResource(AsyncAPIResource):
189
195
  n: int | Omit = omit,
190
196
  negative_prompt: str | Omit = omit,
191
197
  output_format: Literal["jpeg", "png"] | Omit = omit,
198
+ reference_images: SequenceNotStr[str] | Omit = omit,
192
199
  response_format: Literal["base64", "url"] | Omit = omit,
193
200
  seed: int | Omit = omit,
194
201
  steps: int | Omit = omit,
@@ -230,6 +237,10 @@ class AsyncImagesResource(AsyncAPIResource):
230
237
  output_format: The format of the image response. Can be either be `jpeg` or `png`. Defaults to
231
238
  `jpeg`.
232
239
 
240
+ reference_images: An array of image URLs that guide the overall appearance and style of the
241
+ generated image. These reference images influence the visual characteristics
242
+ consistently across the generation.
243
+
233
244
  response_format: Format of the image response. Can be either a base64 string or a URL.
234
245
 
235
246
  seed: Seed used for generation. Can be used to reproduce image generations.
@@ -260,6 +271,7 @@ class AsyncImagesResource(AsyncAPIResource):
260
271
  "n": n,
261
272
  "negative_prompt": negative_prompt,
262
273
  "output_format": output_format,
274
+ "reference_images": reference_images,
263
275
  "response_format": response_format,
264
276
  "seed": seed,
265
277
  "steps": steps,
@@ -34,6 +34,20 @@ class TranscriptionCreateParams(TypedDict, total=False):
34
34
  If `auto` is provided, language is auto-detected.
35
35
  """
36
36
 
37
+ max_speakers: int
38
+ """Maximum number of speakers expected in the audio.
39
+
40
+ Used to improve diarization accuracy when the approximate number of speakers is
41
+ known.
42
+ """
43
+
44
+ min_speakers: int
45
+ """Minimum number of speakers expected in the audio.
46
+
47
+ Used to improve diarization accuracy when the approximate number of speakers is
48
+ known.
49
+ """
50
+
37
51
  model: Literal["openai/whisper-large-v3"]
38
52
  """Model to use for transcription"""
39
53
 
@@ -5,6 +5,8 @@ from __future__ import annotations
5
5
  from typing import Union, Iterable
6
6
  from typing_extensions import Literal, Required, TypedDict
7
7
 
8
+ from .._types import SequenceNotStr
9
+
8
10
  __all__ = ["ImageGenerateParams", "ImageLora"]
9
11
 
10
12
 
@@ -61,6 +63,13 @@ class ImageGenerateParams(TypedDict, total=False):
61
63
  Can be either be `jpeg` or `png`. Defaults to `jpeg`.
62
64
  """
63
65
 
66
+ reference_images: SequenceNotStr[str]
67
+ """
68
+ An array of image URLs that guide the overall appearance and style of the
69
+ generated image. These reference images influence the visual characteristics
70
+ consistently across the generation.
71
+ """
72
+
64
73
  response_format: Literal["base64", "url"]
65
74
  """Format of the image response. Can be either a base64 string or a URL."""
66
75
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: together
3
- Version: 2.0.0a8
3
+ Version: 2.0.0a9
4
4
  Summary: The official Python library for the together API
5
5
  Project-URL: Homepage, https://github.com/togethercomputer/together-py
6
6
  Project-URL: Repository, https://github.com/togethercomputer/together-py
@@ -144,6 +144,7 @@ pip install 'together[aiohttp] @ git+ssh://git@github.com/togethercomputer/toget
144
144
  Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`:
145
145
 
146
146
  ```python
147
+ import os
147
148
  import asyncio
148
149
  from together import DefaultAioHttpClient
149
150
  from together import AsyncTogether
@@ -151,7 +152,7 @@ from together import AsyncTogether
151
152
 
152
153
  async def main() -> None:
153
154
  async with AsyncTogether(
154
- api_key="My API Key",
155
+ api_key=os.environ.get("TOGETHER_API_KEY"), # This is the default and can be omitted
155
156
  http_client=DefaultAioHttpClient(),
156
157
  ) as client:
157
158
  chat_completion = await client.chat.completions.create(
@@ -9,9 +9,9 @@ together/_models.py,sha256=3D65psj_C02Mw0K2zpBWrn1khmrvtEXgTTQ6P4r3tUY,31837
9
9
  together/_qs.py,sha256=craIKyvPktJ94cvf9zn8j8ekG9dWJzhWv0ob34lIOv4,4828
10
10
  together/_resource.py,sha256=-ZTq9O5qf2YsgjJk_gwJs-CM_OG4p6gdMLcNWjuxFwQ,1112
11
11
  together/_response.py,sha256=lvqEsCbpD8SRJTjlhhUFGbnLUR_4-Qva-OApxfVdiY4,28800
12
- together/_streaming.py,sha256=SgGfDGFD2J4pNdMrj--YjD8tCjfEj-htf_Jk6q1aby4,11625
12
+ together/_streaming.py,sha256=sk6fVYbpdO3Y-0S5iwZTHQJ3N24UkK0KaupgUTftWZk,11825
13
13
  together/_types.py,sha256=nL3wDyii53Z400Anq1qLS1pEW0PwQId-OjnbRJDwoj4,7238
14
- together/_version.py,sha256=877Tb5z6TSnhW_tDkXCD4U1eUrrjghU4llqc4rfrnRU,168
14
+ together/_version.py,sha256=p3SWLX96_Ba9f7WpAgA4thdvwcM0aML58lmPvGUECTE,168
15
15
  together/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  together/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  together/_utils/_compat.py,sha256=rN17SSvjMoQE1GmKFTLniRuG1sKj2WAD5VjdLPeRlF0,1231
@@ -24,7 +24,7 @@ together/_utils/_streams.py,sha256=SMC90diFFecpEg_zgDRVbdR3hSEIgVVij4taD-noMLM,2
24
24
  together/_utils/_sync.py,sha256=HBnZkkBnzxtwOZe0212C4EyoRvxhTVtTrLFDz2_xVCg,1589
25
25
  together/_utils/_transform.py,sha256=NjCzmnfqYrsAikUHQig6N9QfuTVbKipuP3ur9mcNF-E,15951
26
26
  together/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4786
27
- together/_utils/_utils.py,sha256=zaHjfH3uG2SSJUqIPCWRaTCkTBNm_1hw-P-ZV0XZHec,12320
27
+ together/_utils/_utils.py,sha256=g9ftElB09kVT6EVfCIlD_nUfANhDX5_vZO61FDWoIQI,12334
28
28
  together/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
29
29
  together/lib/__init__.py,sha256=Qtdi6geFNzxE-F51eNDk1ESXYyYDt8b82MR1POANQBQ,394
30
30
  together/lib/constants.py,sha256=EgcTlmk4QqVqjZjGej5k5JEwoRqidlITQ8LQqzW0dXI,1795
@@ -58,7 +58,7 @@ together/resources/evals.py,sha256=FPjvkbsBY5rrzLyQ-X1G9fWt2QmivI9ol5GExGtqYVA,1
58
58
  together/resources/files.py,sha256=0paHeVqNt3NQCXoztCgFS8PEIg_-mMVto-ulHTr7GzE,16854
59
59
  together/resources/fine_tuning.py,sha256=gjGJn1bAZlHHRGhjhfrZ7bNU1AgTtrQqSK4BshL47Hg,45315
60
60
  together/resources/hardware.py,sha256=xgfCmMrrwF5o1igax0JGec8RY7kkS0s4kKm62RdC3ME,6850
61
- together/resources/images.py,sha256=hRXxDtMl3oBeMq0dfkdyvu4Qi1YqB_dhFsan0wynUk8,11870
61
+ together/resources/images.py,sha256=mVPQYpDHKBjLVO_Sv0uT62zYXdtWKN2PW3fCvfQLQCs,12612
62
62
  together/resources/jobs.py,sha256=TnzSnvJw4x5pqo1xzrkYH8f0viZrzyOqT-_w7xc0NzY,7797
63
63
  together/resources/models.py,sha256=kb4OeIFbyfzCE_4rO87i4AMlnuDoTa3pXqKKG95VoLo,10614
64
64
  together/resources/rerank.py,sha256=Xoaco2OvKdII7AhPaJDqUqoXmJvXbTWmY4_g_aqq8dQ,8334
@@ -66,7 +66,7 @@ together/resources/videos.py,sha256=AdcC08JrUtbcEJV-G0viH4CF1qU9oNjdjQ7U38QCEkU,
66
66
  together/resources/audio/__init__.py,sha256=MKUWFwFsAdCf9LrO8AiUCeIzdknPNDPr4lpAt-pkYSw,2521
67
67
  together/resources/audio/audio.py,sha256=stpvzuxIwMnAQLQnqW1KRxx3G_DI-oDSnx3uDN_X1R8,7180
68
68
  together/resources/audio/speech.py,sha256=ZavAHDhi8rKzIQ0tRTv1UOIlUJQ5_ArvH3JG1JdN41M,27560
69
- together/resources/audio/transcriptions.py,sha256=j_ySc6787yilePUVK-fb_NbCOydLAvsJ3JsM5pWS3L0,11913
69
+ together/resources/audio/transcriptions.py,sha256=k5zLDKXNqISjeieSyi1FKbyKsWyhnkeIkApG3l7QzeY,12965
70
70
  together/resources/audio/translations.py,sha256=zeV1wJPGzBmQGGgSPNA_vigy_4yuV3aBq6sSLa19-jg,10251
71
71
  together/resources/audio/voices.py,sha256=Lj9DtOcv_Dhaq3E5p7Oty1T_JkhrsGDZcDF91HHA3Yw,4905
72
72
  together/resources/chat/__init__.py,sha256=BVAfz9TM3DT5W9f_mt0P9YRxL_MsUxKCWAH6u1iogmA,1041
@@ -122,7 +122,7 @@ together/types/hardware_list_response.py,sha256=cUhOyWYc_Z8-FRBHUgNgA3fI0XTfPgUq
122
122
  together/types/image_data_b64.py,sha256=pLY7JDBb1HF1T29ACbae_xn6JQfttpqQVeG_jJeenZU,284
123
123
  together/types/image_data_url.py,sha256=6A_EYNfcR6Z6sZkyC4MThxeZnK2cvTuQn6-A1dXM85w,274
124
124
  together/types/image_file.py,sha256=sADh0UcrGlemkreIvHBEBizstAvt64CVOu7KtOALcHk,569
125
- together/types/image_generate_params.py,sha256=siOZ7I-pVD61nx3nIwCw7HfMgK-u1SJkMMdUs-7Akkw,2488
125
+ together/types/image_generate_params.py,sha256=bdOsD1NXYjCq8QT27wCd8P1hGWIfCd70E8u6n8TLzGQ,2783
126
126
  together/types/job_list_response.py,sha256=y7tFXGH2dYD9PfVH2_2Rf6RDkWsW4olljXt5FnGO6UA,950
127
127
  together/types/job_retrieve_response.py,sha256=I4HHmSUCAVFuy2RkrZuanssaPLRkDmG_i0Qc192yRmM,880
128
128
  together/types/log_probs.py,sha256=A1DD9Cdb5G7bufrBiaMZC4HJ7v1NH5_zFEYvLgFY1NI,473
@@ -141,7 +141,7 @@ together/types/video_create_params.py,sha256=9Mx7TWTaPHOOpaMezz9FD5VC7hN6jGbnynG
141
141
  together/types/video_job.py,sha256=E3YyxzPDXHv8aFjIqZ8NgokZbkVqOaNM5_ERQAjC6PE,1470
142
142
  together/types/audio/__init__.py,sha256=FRPjWqhXrrSZgg615cnF6cWNqEowSovw__2V7BR3kgo,654
143
143
  together/types/audio/speech_create_params.py,sha256=SwoTcRMG5NnO_LpT3eAXFfOqqxyFh6C-cU8mtY2v4lk,2867
144
- together/types/audio/transcription_create_params.py,sha256=pKjDwN2DUcJG3yVjfw-WmZ68PURwxojJqem3p_5-rJU,1788
144
+ together/types/audio/transcription_create_params.py,sha256=utzevQa2y_fiznVYVIUK7J76HzH_YWgLsvtbtFIH_Gs,2156
145
145
  together/types/audio/transcription_create_response.py,sha256=z8_pzJlzYjP4QxJhwbKuDgAeVpWbgee6jt3QLFVVSjM,3059
146
146
  together/types/audio/translation_create_params.py,sha256=RjKaaR2RNSE4DxuBHCBKDURxxqalZJmApIhtmDV7MBM,1140
147
147
  together/types/audio/translation_create_response.py,sha256=T6SUCExVMin1qSGamHuiWGWS84MZ92tZPBHD7NYm4IU,1843
@@ -157,8 +157,8 @@ together/types/chat/chat_completion_warning.py,sha256=_Dp7YKlxyY2HeZopTvT-Go7qqK
157
157
  together/types/chat/completion_create_params.py,sha256=xPv9X0dtBuPt9wnDm7wlncgGjGZJGkK8P6sFIGhc6WY,10954
158
158
  together/types/code_interpreter/__init__.py,sha256=dAXfb3ryLMtcBalCfxxNu2wJVswVP8G1xXryZnahPQY,201
159
159
  together/types/code_interpreter/session_list_response.py,sha256=TRxLGFTmIY-KLpStKjJtsrm4EI6BBvakpx43B6pkhnw,662
160
- together-2.0.0a8.dist-info/METADATA,sha256=wKEnBkQ3JVtV1vsOSkEuDgY09al1sL7Xj9ikv1xaio8,20202
161
- together-2.0.0a8.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
162
- together-2.0.0a8.dist-info/entry_points.txt,sha256=4f4RAX89wQkx3AnfHXiGrKyg2fCPnwMd2UdPX48OczA,55
163
- together-2.0.0a8.dist-info/licenses/LICENSE,sha256=5I5MO2DiiBFcD_p4ZF2T4GDb-WeBMD591ALtADdtXDc,11338
164
- together-2.0.0a8.dist-info/RECORD,,
160
+ together-2.0.0a9.dist-info/METADATA,sha256=9tkQfrZfh5J-etaBCQFnsx7JIy6lCw8xAEsBfhr7F-M,20276
161
+ together-2.0.0a9.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
162
+ together-2.0.0a9.dist-info/entry_points.txt,sha256=4f4RAX89wQkx3AnfHXiGrKyg2fCPnwMd2UdPX48OczA,55
163
+ together-2.0.0a9.dist-info/licenses/LICENSE,sha256=5I5MO2DiiBFcD_p4ZF2T4GDb-WeBMD591ALtADdtXDc,11338
164
+ together-2.0.0a9.dist-info/RECORD,,