together 1.4.0__py3-none-any.whl → 1.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
together/utils/files.py CHANGED
@@ -4,7 +4,7 @@ import json
4
4
  import os
5
5
  from pathlib import Path
6
6
  from traceback import format_exc
7
- from typing import Any, Dict
7
+ from typing import Any, Dict, List
8
8
 
9
9
  from pyarrow import ArrowInvalid, parquet
10
10
 
@@ -96,6 +96,140 @@ def check_file(
96
96
  return report_dict
97
97
 
98
98
 
99
+ def validate_messages(messages: List[Dict[str, str | bool]], idx: int) -> None:
100
+ """Validate the messages column."""
101
+ if not isinstance(messages, list):
102
+ raise InvalidFileFormatError(
103
+ message=f"Invalid format on line {idx + 1} of the input file. "
104
+ f"Expected a list of messages. Found {type(messages)}",
105
+ line_number=idx + 1,
106
+ error_source="key_value",
107
+ )
108
+ if not messages:
109
+ raise InvalidFileFormatError(
110
+ message=f"Invalid format on line {idx + 1} of the input file. "
111
+ f"Expected a non-empty list of messages. Found empty list",
112
+ line_number=idx + 1,
113
+ error_source="key_value",
114
+ )
115
+
116
+ has_weights = any("weight" in message for message in messages)
117
+
118
+ previous_role = None
119
+ for message in messages:
120
+ if not isinstance(message, dict):
121
+ raise InvalidFileFormatError(
122
+ message=f"Invalid format on line {idx + 1} of the input file. "
123
+ f"Expected a dictionary in the messages list. Found {type(message)}",
124
+ line_number=idx + 1,
125
+ error_source="key_value",
126
+ )
127
+ for column in REQUIRED_COLUMNS_MESSAGE:
128
+ if column not in message:
129
+ raise InvalidFileFormatError(
130
+ message=f"Field `{column}` is missing for a turn `{message}` on line {idx + 1} "
131
+ "of the the input file.",
132
+ line_number=idx + 1,
133
+ error_source="key_value",
134
+ )
135
+ else:
136
+ if not isinstance(message[column], str):
137
+ raise InvalidFileFormatError(
138
+ message=f"Invalid format on line {idx + 1} in the column {column} for turn `{message}` "
139
+ f"of the input file. Expected string. Found {type(message[column])}",
140
+ line_number=idx + 1,
141
+ error_source="text_field",
142
+ )
143
+
144
+ if has_weights and "weight" in message:
145
+ weight = message["weight"]
146
+ if not isinstance(weight, int):
147
+ raise InvalidFileFormatError(
148
+ message="Weight must be an integer",
149
+ line_number=idx + 1,
150
+ error_source="key_value",
151
+ )
152
+ if weight not in {0, 1}:
153
+ raise InvalidFileFormatError(
154
+ message="Weight must be either 0 or 1",
155
+ line_number=idx + 1,
156
+ error_source="key_value",
157
+ )
158
+ if message["role"] not in POSSIBLE_ROLES_CONVERSATION:
159
+ raise InvalidFileFormatError(
160
+ message=f"Found invalid role `{message['role']}` in the messages on the line {idx + 1}. "
161
+ f"Possible roles in the conversation are: {POSSIBLE_ROLES_CONVERSATION}",
162
+ line_number=idx + 1,
163
+ error_source="key_value",
164
+ )
165
+
166
+ if previous_role == message["role"]:
167
+ raise InvalidFileFormatError(
168
+ message=f"Invalid role turns on line {idx + 1} of the input file. "
169
+ "`user` and `assistant` roles must alternate user/assistant/user/assistant/...",
170
+ line_number=idx + 1,
171
+ error_source="key_value",
172
+ )
173
+ previous_role = message["role"]
174
+
175
+
176
+ def validate_preference_openai(example: Dict[str, Any], idx: int = 0) -> None:
177
+ """Validate the OpenAI preference dataset format.
178
+
179
+ Args:
180
+ example (dict): Input entry to be checked.
181
+ idx (int): Line number in the file.
182
+
183
+ Raises:
184
+ InvalidFileFormatError: If the dataset format is invalid.
185
+ """
186
+ if not isinstance(example["input"], dict):
187
+ raise InvalidFileFormatError(
188
+ message="The dataset is malformed, the `input` field must be a dictionary.",
189
+ line_number=idx + 1,
190
+ error_source="key_value",
191
+ )
192
+
193
+ if "messages" not in example["input"]:
194
+ raise InvalidFileFormatError(
195
+ message="The dataset is malformed, the `input` dictionary must contain a `messages` field.",
196
+ line_number=idx + 1,
197
+ error_source="key_value",
198
+ )
199
+
200
+ validate_messages(example["input"]["messages"], idx)
201
+
202
+ for output_field in ["preferred_output", "non_preferred_output"]:
203
+ if not isinstance(example[output_field], list):
204
+ raise InvalidFileFormatError(
205
+ message=f"The dataset is malformed, the `{output_field}` field must be a list.",
206
+ line_number=idx + 1,
207
+ error_source="key_value",
208
+ )
209
+
210
+ if len(example[output_field]) != 1:
211
+ raise InvalidFileFormatError(
212
+ message=f"The dataset is malformed, the `{output_field}` list must contain exactly one message.",
213
+ line_number=idx + 1,
214
+ error_source="key_value",
215
+ )
216
+ if "role" not in example[output_field][0]:
217
+ raise InvalidFileFormatError(
218
+ message=f"The dataset is malformed, the `{output_field}` message is missing the `role` field.",
219
+ line_number=idx + 1,
220
+ error_source="key_value",
221
+ )
222
+ elif example[output_field][0]["role"] != "assistant":
223
+ raise InvalidFileFormatError(
224
+ message=f"The dataset is malformed, the `{output_field}` must contain an assistant message.",
225
+ line_number=idx + 1,
226
+ error_source="key_value",
227
+ )
228
+
229
+ validate_messages(example["preferred_output"], idx)
230
+ validate_messages(example["non_preferred_output"], idx)
231
+
232
+
99
233
  def _check_jsonl(file: Path) -> Dict[str, Any]:
100
234
  report_dict: Dict[str, Any] = {}
101
235
  # Check that the file is UTF-8 encoded. If not report where the error occurs.
@@ -164,74 +298,13 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
164
298
  line_number=idx + 1,
165
299
  error_source="format",
166
300
  )
167
-
168
- if current_format == DatasetFormat.CONVERSATION:
301
+ if current_format == DatasetFormat.PREFERENCE_OPENAI:
302
+ validate_preference_openai(json_line, idx)
303
+ elif current_format == DatasetFormat.CONVERSATION:
169
304
  message_column = JSONL_REQUIRED_COLUMNS_MAP[
170
305
  DatasetFormat.CONVERSATION
171
306
  ][0]
172
- if not isinstance(json_line[message_column], list):
173
- raise InvalidFileFormatError(
174
- message=f"Invalid format on line {idx + 1} of the input file. "
175
- f"Expected a list of messages. Found {type(json_line[message_column])}",
176
- line_number=idx + 1,
177
- error_source="key_value",
178
- )
179
-
180
- if len(json_line[message_column]) == 0:
181
- raise InvalidFileFormatError(
182
- message=f"Invalid format on line {idx + 1} of the input file. "
183
- f"Expected a non-empty list of messages. Found empty list",
184
- line_number=idx + 1,
185
- error_source="key_value",
186
- )
187
-
188
- for turn_id, turn in enumerate(json_line[message_column]):
189
- if not isinstance(turn, dict):
190
- raise InvalidFileFormatError(
191
- message=f"Invalid format on line {idx + 1} of the input file. "
192
- f"Expected a dictionary in the {turn_id + 1} turn. Found {type(turn)}",
193
- line_number=idx + 1,
194
- error_source="key_value",
195
- )
196
-
197
- previous_role = None
198
- for turn in json_line[message_column]:
199
- for column in REQUIRED_COLUMNS_MESSAGE:
200
- if column not in turn:
201
- raise InvalidFileFormatError(
202
- message=f"Field `{column}` is missing for a turn `{turn}` on line {idx + 1} "
203
- "of the the input file.",
204
- line_number=idx + 1,
205
- error_source="key_value",
206
- )
207
- else:
208
- if not isinstance(turn[column], str):
209
- raise InvalidFileFormatError(
210
- message=f"Invalid format on line {idx + 1} in the column {column} for turn `{turn}` "
211
- f"of the input file. Expected string. Found {type(turn[column])}",
212
- line_number=idx + 1,
213
- error_source="text_field",
214
- )
215
- role = turn["role"]
216
-
217
- if role not in POSSIBLE_ROLES_CONVERSATION:
218
- raise InvalidFileFormatError(
219
- message=f"Found invalid role `{role}` in the messages on the line {idx + 1}. "
220
- f"Possible roles in the conversation are: {POSSIBLE_ROLES_CONVERSATION}",
221
- line_number=idx + 1,
222
- error_source="key_value",
223
- )
224
-
225
- if previous_role == role:
226
- raise InvalidFileFormatError(
227
- message=f"Invalid role turns on line {idx + 1} of the input file. "
228
- "`user` and `assistant` roles must alternate user/assistant/user/assistant/...",
229
- line_number=idx + 1,
230
- error_source="key_value",
231
- )
232
-
233
- previous_role = role
234
-
307
+ validate_messages(json_line[message_column], idx)
235
308
  else:
236
309
  for column in JSONL_REQUIRED_COLUMNS_MAP[current_format]:
237
310
  if not isinstance(json_line[column], str):
together/utils/tools.py CHANGED
@@ -3,6 +3,8 @@ from __future__ import annotations
3
3
  import logging
4
4
  import os
5
5
  from datetime import datetime
6
+ import re
7
+ from typing import Any
6
8
 
7
9
 
8
10
  logger = logging.getLogger("together")
@@ -23,18 +25,67 @@ def normalize_key(key: str) -> str:
23
25
  return key.replace("/", "--").replace("_", "-").replace(" ", "-").lower()
24
26
 
25
27
 
26
- def parse_timestamp(timestamp: str) -> datetime:
28
+ def parse_timestamp(timestamp: str) -> datetime | None:
29
+ """Parse a timestamp string into a datetime object or None if the string is empty.
30
+
31
+ Args:
32
+ timestamp (str): Timestamp
33
+
34
+ Returns:
35
+ datetime | None: Parsed datetime, or None if the string is empty
36
+ """
37
+ if timestamp == "":
38
+ return None
39
+
27
40
  formats = ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%SZ"]
28
41
  for fmt in formats:
29
42
  try:
30
43
  return datetime.strptime(timestamp, fmt)
31
44
  except ValueError:
32
45
  continue
46
+
33
47
  raise ValueError("Timestamp does not match any expected format")
34
48
 
35
49
 
36
- # Convert fine-tune nano-dollar price to dollars
50
+ def format_timestamp(timestamp_str: str) -> str:
51
+ """Format timestamp to a readable date string.
52
+
53
+ Args:
54
+ timestamp: A timestamp string
55
+
56
+ Returns:
57
+ str: Formatted timestamp string (MM/DD/YYYY, HH:MM AM/PM)
58
+ """
59
+ timestamp = parse_timestamp(timestamp_str)
60
+ if timestamp is None:
61
+ return ""
62
+ return timestamp.strftime("%m/%d/%Y, %I:%M %p")
63
+
64
+
65
+ def get_event_step(event: Any) -> str | None:
66
+ """Extract the step number from a checkpoint event.
67
+
68
+ Args:
69
+ event: A checkpoint event object
70
+
71
+ Returns:
72
+ str | None: The step number as a string, or None if not found
73
+ """
74
+ step = getattr(event, "step", None)
75
+ if step is not None:
76
+ return str(step)
77
+ return None
78
+
79
+
37
80
  def finetune_price_to_dollars(price: float) -> float:
81
+ """Convert fine-tuning job price to dollars
82
+
83
+ Args:
84
+ price (float): Fine-tuning job price in billing units
85
+
86
+ Returns:
87
+ float: Price in dollars
88
+ """
38
89
  return price / NANODOLLAR
39
90
 
40
91
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: together
3
- Version: 1.4.0
3
+ Version: 1.4.4
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  License: Apache-2.0
6
6
  Author: Together AI
@@ -87,25 +87,101 @@ This repo contains both a Python Library and a CLI. We'll demonstrate how to use
87
87
  ### Chat Completions
88
88
 
89
89
  ```python
90
- import os
91
90
  from together import Together
92
91
 
93
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
92
+ client = Together()
94
93
 
94
+ # Simple text message
95
95
  response = client.chat.completions.create(
96
96
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
97
97
  messages=[{"role": "user", "content": "tell me about new york"}],
98
98
  )
99
99
  print(response.choices[0].message.content)
100
+
101
+ # Multi-modal message with text and image
102
+ response = client.chat.completions.create(
103
+ model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
104
+ messages=[{
105
+ "role": "user",
106
+ "content": [
107
+ {
108
+ "type": "text",
109
+ "text": "What's in this image?"
110
+ },
111
+ {
112
+ "type": "image_url",
113
+ "image_url": {
114
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
115
+ }
116
+ }
117
+ ]
118
+ }]
119
+ )
120
+ print(response.choices[0].message.content)
121
+
122
+ # Multi-modal message with multiple images
123
+ response = client.chat.completions.create(
124
+ model="Qwen/Qwen2.5-VL-72B-Instruct",
125
+ messages=[{
126
+ "role": "user",
127
+ "content": [
128
+ {
129
+ "type": "text",
130
+ "text": "Compare these two images."
131
+ },
132
+ {
133
+ "type": "image_url",
134
+ "image_url": {
135
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
136
+ }
137
+ },
138
+ {
139
+ "type": "image_url",
140
+ "image_url": {
141
+ "url": "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/slack.png"
142
+ }
143
+ }
144
+ ]
145
+ }]
146
+ )
147
+ print(response.choices[0].message.content)
148
+
149
+ # Multi-modal message with text and video
150
+ response = client.chat.completions.create(
151
+ model="Qwen/Qwen2.5-VL-72B-Instruct",
152
+ messages=[{
153
+ "role": "user",
154
+ "content": [
155
+ {
156
+ "type": "text",
157
+ "text": "What's happening in this video?"
158
+ },
159
+ {
160
+ "type": "video_url",
161
+ "video_url": {
162
+ "url": "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4"
163
+ }
164
+ }
165
+ ]
166
+ }]
167
+ )
168
+ print(response.choices[0].message.content)
100
169
  ```
101
170
 
171
+ The chat completions API supports three types of content:
172
+ - Plain text messages using the `content` field directly
173
+ - Multi-modal messages with images using `type: "image_url"`
174
+ - Multi-modal messages with videos using `type: "video_url"`
175
+
176
+ When using multi-modal content, the `content` field becomes an array of content objects, each with its own type and corresponding data.
177
+
102
178
  #### Streaming
103
179
 
104
180
  ```python
105
181
  import os
106
182
  from together import Together
107
183
 
108
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
184
+ client = Together()
109
185
  stream = client.chat.completions.create(
110
186
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
111
187
  messages=[{"role": "user", "content": "tell me about new york"}],
@@ -119,17 +195,17 @@ for chunk in stream:
119
195
  #### Async usage
120
196
 
121
197
  ```python
122
- import os, asyncio
198
+ import asyncio
123
199
  from together import AsyncTogether
124
200
 
125
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
201
+ async_client = AsyncTogether()
126
202
  messages = [
127
203
  "What are the top things to do in San Francisco?",
128
204
  "What country is Paris in?",
129
205
  ]
130
206
 
131
207
  async def async_chat_completion(messages):
132
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
208
+ async_client = AsyncTogether()
133
209
  tasks = [
134
210
  async_client.chat.completions.create(
135
211
  model="mistralai/Mixtral-8x7B-Instruct-v0.1",
@@ -150,10 +226,9 @@ asyncio.run(async_chat_completion(messages))
150
226
  Completions are for code and language models shown [here](https://docs.together.ai/docs/inference-models). Below, a code model example is shown.
151
227
 
152
228
  ```python
153
- import os
154
229
  from together import Together
155
230
 
156
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
231
+ client = Together()
157
232
 
158
233
  response = client.completions.create(
159
234
  model="codellama/CodeLlama-34b-Python-hf",
@@ -166,10 +241,9 @@ print(response.choices[0].text)
166
241
  #### Streaming
167
242
 
168
243
  ```python
169
- import os
170
244
  from together import Together
171
245
 
172
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
246
+ client = Together()
173
247
  stream = client.completions.create(
174
248
  model="codellama/CodeLlama-34b-Python-hf",
175
249
  prompt="Write a Next.js component with TailwindCSS for a header component.",
@@ -183,10 +257,10 @@ for chunk in stream:
183
257
  #### Async usage
184
258
 
185
259
  ```python
186
- import os, asyncio
260
+ import asyncio
187
261
  from together import AsyncTogether
188
262
 
189
- async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
263
+ async_client = AsyncTogether()
190
264
  prompts = [
191
265
  "Write a Next.js component with TailwindCSS for a header component.",
192
266
  "Write a python function for the fibonacci sequence",
@@ -211,10 +285,9 @@ asyncio.run(async_chat_completion(prompts))
211
285
  ### Image generation
212
286
 
213
287
  ```python
214
- import os
215
288
  from together import Together
216
289
 
217
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
290
+ client = Together()
218
291
 
219
292
  response = client.images.generate(
220
293
  prompt="space robots",
@@ -231,7 +304,7 @@ print(response.data[0].b64_json)
231
304
  from typing import List
232
305
  from together import Together
233
306
 
234
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
307
+ client = Together()
235
308
 
236
309
  def get_embeddings(texts: List[str], model: str) -> List[List[float]]:
237
310
  texts = [text.replace("\n", " ") for text in texts]
@@ -250,7 +323,7 @@ print(embeddings)
250
323
  from typing import List
251
324
  from together import Together
252
325
 
253
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
326
+ client = Together()
254
327
 
255
328
  def get_reranked_documents(query: str, documents: List[str], model: str, top_n: int = 3) -> List[str]:
256
329
  outputs = client.rerank.create(model=model, query=query, documents=documents, top_n=top_n)
@@ -272,10 +345,9 @@ Read more about Reranking [here](https://docs.together.ai/docs/rerank-overview).
272
345
  The files API is used for fine-tuning and allows developers to upload data to fine-tune on. It also has several methods to list all files, retrive files, and delete files. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
273
346
 
274
347
  ```python
275
- import os
276
348
  from together import Together
277
349
 
278
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
350
+ client = Together()
279
351
 
280
352
  client.files.upload(file="somedata.jsonl") # uploads a file
281
353
  client.files.list() # lists all uploaded files
@@ -289,10 +361,9 @@ client.files.delete(id="file-d0d318cb-b7d9-493a-bd70-1cfe089d3815") # deletes a
289
361
  The finetune API is used for fine-tuning and allows developers to create finetuning jobs. It also has several methods to list all jobs, retrive statuses and get checkpoints. Please refer to our fine-tuning docs [here](https://docs.together.ai/docs/fine-tuning-python).
290
362
 
291
363
  ```python
292
- import os
293
364
  from together import Together
294
365
 
295
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
366
+ client = Together()
296
367
 
297
368
  client.fine_tuning.create(
298
369
  training_file = 'file-d0d318cb-b7d9-493a-bd70-1cfe089d3815',
@@ -316,10 +387,9 @@ client.fine_tuning.download(id="ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b") # down
316
387
  This lists all the models that Together supports.
317
388
 
318
389
  ```python
319
- import os
320
390
  from together import Together
321
391
 
322
- client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
392
+ client = Together()
323
393
 
324
394
  models = client.models.list()
325
395
 
@@ -1,62 +1,65 @@
1
1
  together/__init__.py,sha256=B8T7ybZ7D6jJNRTuFDVjOFlImCNag8tNZXpZdXz7xNM,1530
2
2
  together/abstract/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- together/abstract/api_requestor.py,sha256=uIIj6D0wWlicm-KXopl4osT6ycE7hiPPXFkmYTSFDqw,25974
3
+ together/abstract/api_requestor.py,sha256=kKVxkJqpd1CQ4t9Ky4kngkvlzZh1xoDN0PBAM8mGW_Q,25948
4
4
  together/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
+ together/cli/api/endpoints.py,sha256=LUIuK4DLs-VYor1nvOPzUNq0WeA7nIgIBHBD5Erdd5I,12470
8
9
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
9
- together/cli/api/finetune.py,sha256=FWHENGE86oLNCVXEJN9AAU3FlSTtnO96aShhB2xVrsk,12923
10
+ together/cli/api/finetune.py,sha256=0Md5FOzl0D6QfAmku628CGy43VzsjJ9-RbtY6ln5W1g,15018
10
11
  together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
11
12
  together/cli/api/models.py,sha256=xWEzu8ZpxM_Pz9KEjRPRVuv_v22RayYZ4QcgiezT5tE,1126
12
13
  together/cli/api/utils.py,sha256=IuqYWPnLI38_Bqd7lj8V_SnGdYc59pRmMbQmciS4FsM,1326
13
- together/cli/cli.py,sha256=RC0tgapkSOFjsRPg8p-8dx9D2LDzm8YmVCHUjk_aVyQ,1977
14
- together/client.py,sha256=EBeec9J9epkAZakm0GcZHmWjchEHOzT24XYqt6KZfwI,5023
15
- together/constants.py,sha256=0L2R8ftvls9eywQstSsrQcpHIkYsOo473vGw0okArN4,1359
16
- together/error.py,sha256=emjhTSsLwiZvW0v1EmYemjacCMtcFIKAXWWK_2IdP18,5419
14
+ together/cli/cli.py,sha256=YCDzbXpC5is0rs2PEkUPrIhYuzdyrihQ8GVR_TlDv5s,2054
15
+ together/client.py,sha256=vOe9NOgDyDlrT5ppvNfJGzdOHnMWEPmJX2RbXUQXKno,5081
16
+ together/constants.py,sha256=UDJhEylJFmdm4bedBDpvqYXBj5Or3k7z9GWtkRY_dZQ,1526
17
+ together/error.py,sha256=HU6247CyzCFjaxL9A0XYbXZ6fY_ebRg0FEYjI4Skogs,5515
17
18
  together/filemanager.py,sha256=QHhBn73oVFdgUpSYXYLmJzHJ9c5wYEMJC0ur6ZgDeYo,11269
18
19
  together/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
20
  together/legacy/base.py,sha256=ehrX1SCfRbK5OA83wL1q7-tfF-yuZOUxzjxYfFtdvvQ,727
20
21
  together/legacy/complete.py,sha256=NRJX-vjnkg4HrgDo9LS3jFfhwfXpeGxcl24dcrLPK3A,2439
21
22
  together/legacy/embeddings.py,sha256=nyTERjyPLTm7Sc987a9FJt1adnW7gIa7xs2CwXLE9EI,635
22
23
  together/legacy/files.py,sha256=qmAqMiNTPWb6WvLV5Tsv6kxGRfQ31q7OkHZNFwkw8v0,4082
23
- together/legacy/finetune.py,sha256=LENaqegeb1PszXDbAhTNPro7T3isz6X_IICIOKH7dKE,5114
24
+ together/legacy/finetune.py,sha256=XjZ4Dn2hSjMUVm64s6u1bbh9F7r9GbDKp-WLmzyEKRw,5123
24
25
  together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
25
26
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
26
- together/resources/__init__.py,sha256=FCsSApph3FrNXB8RXv3smO6Xr86l7SmqtrVcUtKtyCI,878
27
+ together/resources/__init__.py,sha256=OQ8tW9mUIX0Ezk0wvYEnnEym6wGsjBKgXFLU9Ffgb-o,984
27
28
  together/resources/audio/__init__.py,sha256=e7xp0Lkp_nMAHXcuFHS7dLXP_YqTPMMZIilW1TW_sAI,551
28
29
  together/resources/audio/speech.py,sha256=81ib_gIo-Rxoaipx2Pi9ZsKnOTjeFPwSlBrcUkyX5xk,5211
29
30
  together/resources/chat/__init__.py,sha256=RsTptdP8MeGjcdIjze896-J27cRvCbUoMft0X2BVlQ8,617
30
31
  together/resources/chat/completions.py,sha256=jYiNZsWa8RyEacL0VgxWj1egJ857oU4nxIY8uqGHcaU,14459
31
32
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
32
33
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
34
+ together/resources/endpoints.py,sha256=tk_Ih94F9CXDmdRqsmOHS4yedmyxiUfIjFodh6pbCl8,15865
33
35
  together/resources/files.py,sha256=bnPbaF25e4InBRPvHwXHXT-oSX1Z1sZRsnQW5wq82U4,4990
34
- together/resources/finetune.py,sha256=0UiN2jxxV_lQ9QSFKDjAioXVgPCIzb7biIJbcQj1oq4,26998
36
+ together/resources/finetune.py,sha256=euTGbSlFb7fIoRWGD4bc6Q-PKlXkOW7cAbfZALS4DTU,32945
35
37
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
36
38
  together/resources/models.py,sha256=2dtHhXAqTDOOpwSbYLzWcKTC0-m2Szlb7LDYvp7Jr4w,1786
37
39
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
38
40
  together/together_response.py,sha256=a3dgKMPDrlfKQwxYENfNt2T4l2vSZxRWMixhHSy-q3E,1308
39
- together/types/__init__.py,sha256=TL_bbZwAu99S1A18j0arV2vIXEbtHRP8IddmSXx1NT0,2285
41
+ together/types/__init__.py,sha256=edHguHW7OeCPZZWts80Uw6mF406rPzWAcoCQLueO1_0,2552
40
42
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
41
43
  together/types/audio_speech.py,sha256=jlj8BZf3dkIDARF1P11fuenVLj4try8Yx4RN-EAkhOU,2609
42
- together/types/chat_completions.py,sha256=tIHQzB1N1DsUl3WojsrfErqxVmcI_eweGVp_gbf6dp8,4914
44
+ together/types/chat_completions.py,sha256=ggwt1LlBXTB_hZKbtLsjg8j-gXxO8pUUQfTrxUmRXHU,5078
43
45
  together/types/common.py,sha256=kxZ-N9xtBsGYZBmbIWnZ0rfT3Pn8PFB7sAbp3iv96pw,1525
44
46
  together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4,2971
45
47
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
48
+ together/types/endpoints.py,sha256=EzNhHOoQ_D9fUdNQtxQPeSWiFzdFLqpNodN0YLmv_h0,4393
46
49
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
47
50
  together/types/files.py,sha256=-rEUfsV6f2vZB9NrFxT4_933ubsDIUNkPB-3OlOFk4A,1954
48
- together/types/finetune.py,sha256=u4rZne7dd0F3jfQ9iXxIVG405kfr65rlJiEMkEZrfWY,9052
51
+ together/types/finetune.py,sha256=rsmzxUF2gEh6KzlxoagkuUEiJz1gHDwuRgZnmotyQ1k,9994
49
52
  together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
50
53
  together/types/models.py,sha256=nwQIZGHKZpX9I6mK8z56VW70YC6Ry6JGsVa0s99QVxc,1055
51
54
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
52
- together/utils/__init__.py,sha256=n1kmLiaExT9YOKT5ye--dC4tW2qcHeicKX0GR86U640,698
55
+ together/utils/__init__.py,sha256=5fqvj4KT2rHxKSQot2TSyV_HcvkvkGiqAiaYuJwqtm0,786
53
56
  together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
54
57
  together/utils/api_helpers.py,sha256=RSF7SRhbjHzroMOSWAXscflByM1r1ta_1SpxkAT22iE,2407
55
- together/utils/files.py,sha256=4SxxrTYfVoWvsD0n7O73LVjexAxYCWvXUBgmzrJY5-s,14169
56
- together/utils/tools.py,sha256=3-lXWP3cBCzOVSZg9tr5zOT1jaVeKAKVWxO2fcXZTh8,1788
58
+ together/utils/files.py,sha256=rfp10qU0urtWOXXFeasFtO9xp-1KIhM3S43JxcnHmL0,16438
59
+ together/utils/tools.py,sha256=H2MTJhEqtBllaDvOyZehIO_IVNK3P17rSDeILtJIVag,2964
57
60
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
58
- together-1.4.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
59
- together-1.4.0.dist-info/METADATA,sha256=v1LquKItktOESHCPhDiIBR9eFXwCcTJQZYGNihiSJ6g,12649
60
- together-1.4.0.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
61
- together-1.4.0.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
62
- together-1.4.0.dist-info/RECORD,,
61
+ together-1.4.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
62
+ together-1.4.4.dist-info/METADATA,sha256=ICtNOO5v35bKJFEtlgdVg4ORP0ofYO11vANs-QuowxY,14445
63
+ together-1.4.4.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
64
+ together-1.4.4.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
65
+ together-1.4.4.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.0.1
2
+ Generator: poetry-core 2.1.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any