together 1.5.20__py3-none-any.whl → 1.5.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
together/utils/files.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  import os
5
+ import csv
5
6
  from pathlib import Path
6
7
  from traceback import format_exc
7
8
  from typing import Any, Dict, List
@@ -17,6 +18,7 @@ from together.constants import (
17
18
  POSSIBLE_ROLES_CONVERSATION,
18
19
  DatasetFormat,
19
20
  )
21
+ from together.types import FilePurpose
20
22
 
21
23
 
22
24
  class InvalidFileFormatError(ValueError):
@@ -36,6 +38,7 @@ class InvalidFileFormatError(ValueError):
36
38
 
37
39
  def check_file(
38
40
  file: Path | str,
41
+ purpose: FilePurpose | str = FilePurpose.FineTune,
39
42
  ) -> Dict[str, Any]:
40
43
  if not isinstance(file, Path):
41
44
  file = Path(file)
@@ -52,6 +55,7 @@ def check_file(
52
55
  "has_min_samples": None,
53
56
  "num_samples": None,
54
57
  "load_json": None,
58
+ "load_csv": None,
55
59
  }
56
60
 
57
61
  if not file.is_file():
@@ -79,10 +83,13 @@ def check_file(
79
83
  data_report_dict = {}
80
84
  if file.suffix == ".jsonl":
81
85
  report_dict["filetype"] = "jsonl"
82
- data_report_dict = _check_jsonl(file)
86
+ data_report_dict = _check_jsonl(file, purpose)
83
87
  elif file.suffix == ".parquet":
84
88
  report_dict["filetype"] = "parquet"
85
- data_report_dict = _check_parquet(file)
89
+ data_report_dict = _check_parquet(file, purpose)
90
+ elif file.suffix == ".csv":
91
+ report_dict["filetype"] = "csv"
92
+ data_report_dict = _check_csv(file, purpose)
86
93
  else:
87
94
  report_dict["filetype"] = (
88
95
  f"Unknown extension of file {file}. "
@@ -229,9 +236,15 @@ def validate_preference_openai(example: Dict[str, Any], idx: int = 0) -> None:
229
236
  validate_messages(example["non_preferred_output"], idx)
230
237
 
231
238
 
232
- def _check_jsonl(file: Path) -> Dict[str, Any]:
239
+ def _check_utf8(file: Path) -> Dict[str, Any]:
240
+ """Check if the file is UTF-8 encoded.
241
+
242
+ Args:
243
+ file (Path): Path to the file to check.
244
+ Returns:
245
+ Dict[str, Any]: A dictionary with the results of the check.
246
+ """
233
247
  report_dict: Dict[str, Any] = {}
234
- # Check that the file is UTF-8 encoded. If not report where the error occurs.
235
248
  try:
236
249
  with file.open(encoding="utf-8") as f:
237
250
  f.read()
@@ -240,6 +253,99 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
240
253
  report_dict["utf8"] = False
241
254
  report_dict["message"] = f"File is not UTF-8 encoded. Error raised: {e}."
242
255
  report_dict["is_check_passed"] = False
256
+ return report_dict
257
+
258
+
259
+ def _check_samples_count(
260
+ file: Path, report_dict: Dict[str, Any], idx: int
261
+ ) -> Dict[str, Any]:
262
+ if idx + 1 < MIN_SAMPLES:
263
+ report_dict["has_min_samples"] = False
264
+ report_dict["message"] = (
265
+ f"Processing {file} resulted in only {idx + 1} samples. "
266
+ f"Our minimum is {MIN_SAMPLES} samples. "
267
+ )
268
+ report_dict["is_check_passed"] = False
269
+ else:
270
+ report_dict["num_samples"] = idx + 1
271
+ report_dict["has_min_samples"] = True
272
+
273
+ return report_dict
274
+
275
+
276
+ def _check_csv(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
277
+ """Check if the file is a valid CSV file.
278
+
279
+ Args:
280
+ file (Path): Path to the file to check.
281
+ purpose (FilePurpose | str): Purpose of the file, used to determine if the file should be checked for specific columns.
282
+
283
+ Returns:
284
+ Dict[str, Any]: A dictionary with the results of the check.
285
+ """
286
+ report_dict: Dict[str, Any] = {}
287
+ if purpose != FilePurpose.Eval:
288
+ report_dict["is_check_passed"] = False
289
+ report_dict["message"] = (
290
+ f"CSV files are not supported for {purpose}. "
291
+ "Only JSONL and Parquet files are supported."
292
+ )
293
+ return report_dict
294
+
295
+ report_dict.update(_check_utf8(file))
296
+
297
+ if not report_dict["utf8"]:
298
+ return report_dict
299
+
300
+ with file.open() as f:
301
+ reader = csv.DictReader(f)
302
+ if not reader.fieldnames:
303
+ report_dict["message"] = "CSV file is empty or has no header."
304
+ report_dict["is_check_passed"] = False
305
+ return report_dict
306
+ idx = -1
307
+
308
+ try:
309
+ # for loop to iterate through the CSV rows
310
+ for idx, item in enumerate(reader):
311
+ if None in item.keys() or None in item.values():
312
+ raise InvalidFileFormatError(
313
+ message=f"CSV file is malformed or the number of columns found on line {idx + 1} is inconsistent with the header",
314
+ line_number=idx + 1,
315
+ error_source="format",
316
+ )
317
+
318
+ report_dict.update(_check_samples_count(file, report_dict, idx))
319
+ report_dict["load_csv"] = True
320
+
321
+ except InvalidFileFormatError as e:
322
+ report_dict["load_csv"] = False
323
+ report_dict["is_check_passed"] = False
324
+ report_dict["message"] = e.message
325
+ if e.line_number is not None:
326
+ report_dict["line_number"] = e.line_number
327
+ if e.error_source is not None:
328
+ report_dict[e.error_source] = False
329
+ except ValueError:
330
+ report_dict["load_csv"] = False
331
+ if idx < 0:
332
+ report_dict["message"] = (
333
+ "Unable to decode file. "
334
+ "File may be empty or in an unsupported format. "
335
+ )
336
+ else:
337
+ report_dict["message"] = (
338
+ f"Error parsing the CSV file. Unexpected format on line {idx + 1}."
339
+ )
340
+ report_dict["is_check_passed"] = False
341
+
342
+ return report_dict
343
+
344
+
345
+ def _check_jsonl(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
346
+ report_dict: Dict[str, Any] = {}
347
+ report_dict.update(_check_utf8(file))
348
+ if not report_dict["utf8"]:
243
349
  return report_dict
244
350
 
245
351
  dataset_format = None
@@ -259,84 +365,75 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
259
365
  line_number=idx + 1,
260
366
  error_source="line_type",
261
367
  )
262
-
263
- current_format = None
264
- for possible_format in JSONL_REQUIRED_COLUMNS_MAP:
265
- if all(
266
- column in json_line
267
- for column in JSONL_REQUIRED_COLUMNS_MAP[possible_format]
268
- ):
269
- if current_format is None:
270
- current_format = possible_format
271
- elif current_format != possible_format:
272
- raise InvalidFileFormatError(
273
- message="Found multiple dataset formats in the input file. "
274
- f"Got {current_format} and {possible_format} on line {idx + 1}.",
275
- line_number=idx + 1,
276
- error_source="format",
277
- )
278
-
279
- # Check that there are no extra columns
280
- for column in json_line:
281
- if (
282
- column
283
- not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]
284
- ):
368
+ # In evals, we don't check the format of the dataset.
369
+ if purpose != FilePurpose.Eval:
370
+ current_format = None
371
+ for possible_format in JSONL_REQUIRED_COLUMNS_MAP:
372
+ if all(
373
+ column in json_line
374
+ for column in JSONL_REQUIRED_COLUMNS_MAP[possible_format]
375
+ ):
376
+ if current_format is None:
377
+ current_format = possible_format
378
+ elif current_format != possible_format:
285
379
  raise InvalidFileFormatError(
286
- message=f'Found extra column "{column}" in the line {idx + 1}.',
380
+ message="Found multiple dataset formats in the input file. "
381
+ f"Got {current_format} and {possible_format} on line {idx + 1}.",
287
382
  line_number=idx + 1,
288
383
  error_source="format",
289
384
  )
290
385
 
291
- if current_format is None:
292
- raise InvalidFileFormatError(
293
- message=(
294
- f"Error parsing file. Could not detect a format for the line {idx + 1} with the columns:\n"
295
- f"{json_line.keys()}"
296
- ),
297
- line_number=idx + 1,
298
- error_source="format",
299
- )
300
- if current_format == DatasetFormat.PREFERENCE_OPENAI:
301
- validate_preference_openai(json_line, idx)
302
- elif current_format == DatasetFormat.CONVERSATION:
303
- message_column = JSONL_REQUIRED_COLUMNS_MAP[
304
- DatasetFormat.CONVERSATION
305
- ][0]
306
- validate_messages(json_line[message_column], idx)
307
- else:
308
- for column in JSONL_REQUIRED_COLUMNS_MAP[current_format]:
309
- if not isinstance(json_line[column], str):
310
- raise InvalidFileFormatError(
311
- message=f'Invalid value type for "{column}" key on line {idx + 1}. '
312
- f"Expected string. Found {type(json_line[column])}.",
313
- line_number=idx + 1,
314
- error_source="key_value",
315
- )
316
-
317
- if dataset_format is None:
318
- dataset_format = current_format
319
- elif current_format is not None:
320
- if current_format != dataset_format:
386
+ # Check that there are no extra columns
387
+ for column in json_line:
388
+ if (
389
+ column
390
+ not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]
391
+ ):
392
+ raise InvalidFileFormatError(
393
+ message=f'Found extra column "{column}" in the line {idx + 1}.',
394
+ line_number=idx + 1,
395
+ error_source="format",
396
+ )
397
+
398
+ if current_format is None:
321
399
  raise InvalidFileFormatError(
322
- message="All samples in the dataset must have the same dataset format. "
323
- f"Got {dataset_format} for the first line and {current_format} "
324
- f"for the line {idx + 1}.",
400
+ message=(
401
+ f"Error parsing file. Could not detect a format for the line {idx + 1} with the columns:\n"
402
+ f"{json_line.keys()}"
403
+ ),
325
404
  line_number=idx + 1,
326
405
  error_source="format",
327
406
  )
407
+ if current_format == DatasetFormat.PREFERENCE_OPENAI:
408
+ validate_preference_openai(json_line, idx)
409
+ elif current_format == DatasetFormat.CONVERSATION:
410
+ message_column = JSONL_REQUIRED_COLUMNS_MAP[
411
+ DatasetFormat.CONVERSATION
412
+ ][0]
413
+ validate_messages(json_line[message_column], idx)
414
+ else:
415
+ for column in JSONL_REQUIRED_COLUMNS_MAP[current_format]:
416
+ if not isinstance(json_line[column], str):
417
+ raise InvalidFileFormatError(
418
+ message=f'Invalid value type for "{column}" key on line {idx + 1}. '
419
+ f"Expected string. Found {type(json_line[column])}.",
420
+ line_number=idx + 1,
421
+ error_source="key_value",
422
+ )
328
423
 
329
- if idx + 1 < MIN_SAMPLES:
330
- report_dict["has_min_samples"] = False
331
- report_dict["message"] = (
332
- f"Processing {file} resulted in only {idx + 1} samples. "
333
- f"Our minimum is {MIN_SAMPLES} samples. "
334
- )
335
- report_dict["is_check_passed"] = False
336
- else:
337
- report_dict["num_samples"] = idx + 1
338
- report_dict["has_min_samples"] = True
339
- report_dict["is_check_passed"] = True
424
+ if dataset_format is None:
425
+ dataset_format = current_format
426
+ elif current_format is not None:
427
+ if current_format != dataset_format:
428
+ raise InvalidFileFormatError(
429
+ message="All samples in the dataset must have the same dataset format. "
430
+ f"Got {dataset_format} for the first line and {current_format} "
431
+ f"for the line {idx + 1}.",
432
+ line_number=idx + 1,
433
+ error_source="format",
434
+ )
435
+
436
+ report_dict.update(_check_samples_count(file, report_dict, idx))
340
437
 
341
438
  report_dict["load_json"] = True
342
439
 
@@ -370,7 +467,7 @@ def _check_jsonl(file: Path) -> Dict[str, Any]:
370
467
  return report_dict
371
468
 
372
469
 
373
- def _check_parquet(file: Path) -> Dict[str, Any]:
470
+ def _check_parquet(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
374
471
  try:
375
472
  # Pyarrow is optional as it's large (~80MB) and isn't compatible with older systems.
376
473
  from pyarrow import ArrowInvalid, parquet
@@ -380,6 +477,13 @@ def _check_parquet(file: Path) -> Dict[str, Any]:
380
477
  )
381
478
 
382
479
  report_dict: Dict[str, Any] = {}
480
+ if purpose == FilePurpose.Eval:
481
+ report_dict["is_check_passed"] = False
482
+ report_dict["message"] = (
483
+ f"Parquet files are not supported for {purpose}. "
484
+ "Only JSONL and CSV files are supported."
485
+ )
486
+ return report_dict
383
487
 
384
488
  try:
385
489
  table = parquet.read_table(str(file), memory_map=True)
@@ -399,6 +503,7 @@ def _check_parquet(file: Path) -> Dict[str, Any]:
399
503
  report_dict["is_check_passed"] = False
400
504
  return report_dict
401
505
 
506
+ # Don't check for eval
402
507
  for column_name in column_names:
403
508
  if column_name not in PARQUET_EXPECTED_COLUMNS:
404
509
  report_dict["load_parquet"] = (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: together
3
- Version: 1.5.20
3
+ Version: 1.5.23
4
4
  Summary: Python client for Together's Cloud Platform!
5
5
  License: Apache-2.0
6
6
  Author: Together AI
@@ -421,6 +421,33 @@ for model in models:
421
421
  print(model)
422
422
  ```
423
423
 
424
+ ### Batch Inference
425
+
426
+ The batch API allows you to submit larger inference jobs for completion with a 24 hour turn-around time, below is an example. To learn more refer to the [docs here](https://docs.together.ai/docs/batch-inference).
427
+
428
+ ```python
429
+ from together import Together
430
+
431
+ client = Together()
432
+
433
+ # Upload the batch file
434
+ batch_file = client.files.upload(file="simpleqa_batch_student.jsonl", purpose="batch-api")
435
+
436
+ # Create the batch job
437
+ batch = client.batches.create_batch(file_id=batch_file.id, endpoint="/v1/chat/completions")
438
+
439
+ # Monitor the batch status
440
+ batch_stat = client.batches.get_batch(batch.id)
441
+
442
+ # List all batches - contains other batches as well
443
+ client.batches.list_batches()
444
+
445
+ # Download the file content if job completed
446
+ if batch_stat.status == 'COMPLETED':
447
+ output_response = client.files.retrieve_content(id=batch_stat.output_file_id,
448
+ output="simpleqa_v3_output.jsonl")
449
+ ```
450
+
424
451
  ## Usage – CLI
425
452
 
426
453
  ### Chat Completions
@@ -6,43 +6,47 @@ together/cli/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
6
6
  together/cli/api/chat.py,sha256=2PHRb-9T-lUEKhUJFtc7SxJv3shCVx40gq_8pzfsewM,9234
7
7
  together/cli/api/completions.py,sha256=l-Zw5t7hojL3w8xd_mitS2NRB72i5Z0xwkzH0rT5XMc,4263
8
8
  together/cli/api/endpoints.py,sha256=f6KafWZvRF6n_ThWdr3y9uhE6wPF37PcD45w_EtgXmY,13289
9
+ together/cli/api/evaluation.py,sha256=reu8LRUDqDBf9mCwbbD_kETyB4PdokvA5mc792iIrSU,11367
9
10
  together/cli/api/files.py,sha256=QLYEXRkY8J2Gg1SbTCtzGfoTMvosoeACNK83L_oLubs,3397
10
- together/cli/api/finetune.py,sha256=vIAvHQ8K6AxqJn2aqxd2ZPb1ZicLeb509_LpD4A9Thw,17517
11
+ together/cli/api/finetune.py,sha256=bXvkI1oxaEHOKlzHFzdTQv6G39qX22lZ8L4IRpJ5uZU,16832
11
12
  together/cli/api/images.py,sha256=GADSeaNUHUVMtWovmccGuKc28IJ9E_v4vAEwYHJhu5o,2645
12
13
  together/cli/api/models.py,sha256=CXw8B1hqNkadogi58GIXhLg_dTJnvTBaE7Kq1_xQ-10,1423
13
14
  together/cli/api/utils.py,sha256=IuqYWPnLI38_Bqd7lj8V_SnGdYc59pRmMbQmciS4FsM,1326
14
- together/cli/cli.py,sha256=YCDzbXpC5is0rs2PEkUPrIhYuzdyrihQ8GVR_TlDv5s,2054
15
- together/client.py,sha256=us5aE8hVzKmMCHZz52NcSPXByOsigd2sKbbyqe4x1m0,5861
15
+ together/cli/cli.py,sha256=PVahUjOfAQIjo209FoPKljcCA_OIpOYQ9MAsCjfEMu0,2134
16
+ together/client.py,sha256=xJO2WMli6eGas7OQFkF1hmLSN2Wd2u7iPJXBKueNeIs,6152
16
17
  together/constants.py,sha256=UDJhEylJFmdm4bedBDpvqYXBj5Or3k7z9GWtkRY_dZQ,1526
17
18
  together/error.py,sha256=HU6247CyzCFjaxL9A0XYbXZ6fY_ebRg0FEYjI4Skogs,5515
18
- together/filemanager.py,sha256=lwNIYm-BAcnUPtyE0Q_8NpRNsxMlQrpIWFVUVJBBz88,11356
19
+ together/filemanager.py,sha256=RqRs4707oxQMzG3eQ0BreDlJpyOotP7rvHr-Bg-7QFk,11290
19
20
  together/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
21
  together/legacy/base.py,sha256=ehrX1SCfRbK5OA83wL1q7-tfF-yuZOUxzjxYfFtdvvQ,727
21
22
  together/legacy/complete.py,sha256=NRJX-vjnkg4HrgDo9LS3jFfhwfXpeGxcl24dcrLPK3A,2439
22
23
  together/legacy/embeddings.py,sha256=nyTERjyPLTm7Sc987a9FJt1adnW7gIa7xs2CwXLE9EI,635
23
24
  together/legacy/files.py,sha256=qmAqMiNTPWb6WvLV5Tsv6kxGRfQ31q7OkHZNFwkw8v0,4082
24
- together/legacy/finetune.py,sha256=XjZ4Dn2hSjMUVm64s6u1bbh9F7r9GbDKp-WLmzyEKRw,5123
25
+ together/legacy/finetune.py,sha256=nL2Ytt8FOVtGbcMumnn1gyf4aEFrRok8GolWJJaHQAg,5094
25
26
  together/legacy/images.py,sha256=bJJRs-6C7-NexPyaeyHiYlHOU51yls5-QAiqtO4xrZU,626
26
27
  together/legacy/models.py,sha256=85ZN9Ids_FjdYNDRv5k7sgrtVWPKPHqkDplORtVUGHg,1087
27
- together/resources/__init__.py,sha256=jZ9O14K7wKb1U0bmIMosa9P3W3xPCJhoEJiaHw8anCc,1078
28
- together/resources/audio/__init__.py,sha256=e7xp0Lkp_nMAHXcuFHS7dLXP_YqTPMMZIilW1TW_sAI,551
28
+ together/resources/__init__.py,sha256=nWpJWtZQNNoaurYh-FYpChljDLVJu1anfCd4wmskKWM,1189
29
+ together/resources/audio/__init__.py,sha256=S6gV6aEPAHL9kskoA38Uq_Ju7uM1Xcfl0doO-DtQLbo,1185
29
30
  together/resources/audio/speech.py,sha256=81ib_gIo-Rxoaipx2Pi9ZsKnOTjeFPwSlBrcUkyX5xk,5211
30
- together/resources/batch.py,sha256=wSJR30CAFjzZ436vjYdXLCT0ahA5E0ud_qHMS5YvZ1M,3750
31
+ together/resources/audio/transcriptions.py,sha256=67TPiDzfEcsHMpRyZx8eGR5jtnmEcZNUjA6g3Ykq4Zg,10219
32
+ together/resources/audio/translations.py,sha256=_2VeYEthYzPIflDD_hlVmoXk-OCgLgnvva2vMPpaU_Q,10508
33
+ together/resources/batch.py,sha256=tYd8UsfBrVWmdw0nHc2TiYhtXosNkeYBb9Hruze-71A,3749
31
34
  together/resources/chat/__init__.py,sha256=RsTptdP8MeGjcdIjze896-J27cRvCbUoMft0X2BVlQ8,617
32
35
  together/resources/chat/completions.py,sha256=cBsSFWi9qToQCn4V_3qJ0gwRqORjF6NFDXmHcHfIhOY,14442
33
36
  together/resources/code_interpreter.py,sha256=vbN8Mh5MG6HQvqra7p61leIyfebgbgJTM_q2A_Fylhw,2948
34
37
  together/resources/completions.py,sha256=5Wa-ZjPCxRcam6CDe7KgGYlTA7yJZMmd5TrRgGCL_ug,11726
35
38
  together/resources/embeddings.py,sha256=PTvLb82yjG_-iQOyuhsilp77Fr7gZ0o6WD2KeRnKoxs,2675
36
39
  together/resources/endpoints.py,sha256=NNjp-wyzOotzlscGGrANhOHxQBjHTN8f5kTQTH_CLvE,17177
40
+ together/resources/evaluation.py,sha256=YjHCT9JZ30ENuSJ16WZRLPtB1qEIo2aXt8ggK06M1XY,26987
37
41
  together/resources/files.py,sha256=y3Ri6UtyAa7fjCJ8_fp26Y2hzzi6Aoo21JKkVgljFl8,5026
38
- together/resources/finetune.py,sha256=IFevk_AwCr9D2f4LssKEFP9OEDbHSuFA5CZMusDHajc,41343
42
+ together/resources/finetune.py,sha256=jrvd3CYlu5AWeyujuwz5YkAey6mNg_aV6HPXkKP1OtY,40979
39
43
  together/resources/images.py,sha256=LQUjKPaFxWTqOAPnyF1Pp7Rz4NLOYhmoKwshpYiprEM,4923
40
44
  together/resources/models.py,sha256=qgmAXv61Cq4oLxytenEZBywA8shldDHYxJ_EAu_4JWQ,3864
41
45
  together/resources/rerank.py,sha256=3Ju_aRSyZ1s_3zCSNZnSnEJErUVmt2xa3M8z1nvejMA,3931
42
46
  together/together_response.py,sha256=a3dgKMPDrlfKQwxYENfNt2T4l2vSZxRWMixhHSy-q3E,1308
43
- together/types/__init__.py,sha256=_93XstLg1OOWratj_N1bsNN-2aS628uHH3SZj0wszyc,2820
47
+ together/types/__init__.py,sha256=qSGo1AWLB0v7L_y1Fl1FQ_Cen48UmRs0Rc-EEQOjj_A,3942
44
48
  together/types/abstract.py,sha256=1lFQI_3WjsR_t1128AeKW0aTk6EiM6Gh1J3ZuyLLPao,642
45
- together/types/audio_speech.py,sha256=jlj8BZf3dkIDARF1P11fuenVLj4try8Yx4RN-EAkhOU,2609
49
+ together/types/audio_speech.py,sha256=7GNldCfddDNo1vVPqyT-u7fX_TR-du1OePSzoXdAK3s,4694
46
50
  together/types/batch.py,sha256=FP0RuQ3EDy-FV1bh-biPICvyRS7WqLm38GHz5lzKyXM,1112
47
51
  together/types/chat_completions.py,sha256=NxJ7tFlWynxoLsRtQHzM7Ka3QxKVjRs6EvtOTYZ79bM,5340
48
52
  together/types/code_interpreter.py,sha256=cjF8TKgRkJllHS4i24dWQZBGTRsG557eHSewOiip0Kk,1770
@@ -51,19 +55,20 @@ together/types/completions.py,sha256=o3FR5ixsTUj-a3pmOUzbSQg-hESVhpqrC9UD__VCqr4
51
55
  together/types/embeddings.py,sha256=J7grkYYn7xhqeKaBO2T-8XQRtHhkzYzymovtGdIUK5A,751
52
56
  together/types/endpoints.py,sha256=EzNhHOoQ_D9fUdNQtxQPeSWiFzdFLqpNodN0YLmv_h0,4393
53
57
  together/types/error.py,sha256=OVlCs3cx_2WhZK4JzHT8SQyRIIqKOP1AZQ4y1PydjAE,370
54
- together/types/files.py,sha256=i-Ke57p8Svb1MbMZxu-Fo2zxIc6j-mDO2TLGNwPpGu0,1981
55
- together/types/finetune.py,sha256=doSbh_zS_W-tcfu4K-yq_wcdHbvkQwHDpYSKfGVphik,11298
58
+ together/types/evaluation.py,sha256=_EG295lqPH2lubO4NNOHTK6VYTIXBF9LM4_nsSOP75g,2078
59
+ together/types/files.py,sha256=xgDoCCQMqvUGbiAkCJqZWkau_eLitaUgqZmfZuTzrxI,2015
60
+ together/types/finetune.py,sha256=pkjRmdJT0MBiQhGCEemZUq9AgZuURBj-Ug4dMXxGFas,11315
56
61
  together/types/images.py,sha256=xnC-FZGdZU30WSFTybfGneWxb-kj0ZGufJsgHtB8j0k,980
57
62
  together/types/models.py,sha256=99-zr2iJPxc0H_jB0adXSECs_hHqYpnAIAcb9KIs4pU,1085
58
63
  together/types/rerank.py,sha256=qZfuXOn7MZ6ly8hpJ_MZ7OU_Bi1-cgYNSB20Wja8Qkk,1061
59
64
  together/utils/__init__.py,sha256=5fqvj4KT2rHxKSQot2TSyV_HcvkvkGiqAiaYuJwqtm0,786
60
65
  together/utils/_log.py,sha256=5IYNI-jYzxyIS-pUvhb0vE_Muo3MA7GgBhsu66TKP2w,1951
61
66
  together/utils/api_helpers.py,sha256=2K0O6qeEQ2zVFvi5NBN5m2kjZJaS3-JfKFecQ7SmGaw,3746
62
- together/utils/files.py,sha256=btWQawwXbNKfPmCtRyObZViG1Xx-IPz45PrAtMXvcy8,16741
67
+ together/utils/files.py,sha256=CTngpKf_Erp31fbT0dQFtKXrABcsKUpSX1_EGQTgFno,20682
63
68
  together/utils/tools.py,sha256=H2MTJhEqtBllaDvOyZehIO_IVNK3P17rSDeILtJIVag,2964
64
69
  together/version.py,sha256=p03ivHyE0SyWU4jAnRTBi_sOwywVWoZPU4g2gzRgG-Y,126
65
- together-1.5.20.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
66
- together-1.5.20.dist-info/METADATA,sha256=fydqr1H3el1KVOXpdKAOtdIPHJuomTuEeHo3M-smhuc,15497
67
- together-1.5.20.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
68
- together-1.5.20.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
69
- together-1.5.20.dist-info/RECORD,,
70
+ together-1.5.23.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
71
+ together-1.5.23.dist-info/METADATA,sha256=w8bOKW50Jw3KXqgT31YKKRaO8uaeW7hvXAs7WYX8Vtg,16441
72
+ together-1.5.23.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
73
+ together-1.5.23.dist-info/entry_points.txt,sha256=G-b5NKW6lUUf1V1fH8IPTBb7jXnK7lhbX9H1zTEJXPs,50
74
+ together-1.5.23.dist-info/RECORD,,