together 1.2.11__py3-none-any.whl → 2.0.0a8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- together/__init__.py +101 -63
- together/_base_client.py +1995 -0
- together/_client.py +1033 -0
- together/_compat.py +219 -0
- together/_constants.py +14 -0
- together/_exceptions.py +108 -0
- together/_files.py +123 -0
- together/_models.py +857 -0
- together/_qs.py +150 -0
- together/_resource.py +43 -0
- together/_response.py +830 -0
- together/_streaming.py +370 -0
- together/_types.py +260 -0
- together/_utils/__init__.py +64 -0
- together/_utils/_compat.py +45 -0
- together/_utils/_datetime_parse.py +136 -0
- together/_utils/_logs.py +25 -0
- together/_utils/_proxy.py +65 -0
- together/_utils/_reflection.py +42 -0
- together/_utils/_resources_proxy.py +24 -0
- together/_utils/_streams.py +12 -0
- together/_utils/_sync.py +58 -0
- together/_utils/_transform.py +457 -0
- together/_utils/_typing.py +156 -0
- together/_utils/_utils.py +421 -0
- together/_version.py +4 -0
- together/lib/.keep +4 -0
- together/lib/__init__.py +23 -0
- together/lib/cli/api/endpoints.py +467 -0
- together/lib/cli/api/evals.py +588 -0
- together/{cli → lib/cli}/api/files.py +20 -17
- together/lib/cli/api/fine_tuning.py +566 -0
- together/lib/cli/api/models.py +140 -0
- together/lib/cli/api/utils.py +50 -0
- together/{cli → lib/cli}/cli.py +17 -23
- together/lib/constants.py +61 -0
- together/lib/resources/__init__.py +11 -0
- together/lib/resources/files.py +999 -0
- together/lib/resources/fine_tuning.py +280 -0
- together/lib/resources/models.py +35 -0
- together/lib/types/__init__.py +13 -0
- together/lib/types/error.py +9 -0
- together/lib/types/fine_tuning.py +455 -0
- together/{utils → lib/utils}/__init__.py +7 -10
- together/{utils → lib/utils}/_log.py +18 -13
- together/lib/utils/files.py +628 -0
- together/lib/utils/serializer.py +10 -0
- together/{utils → lib/utils}/tools.py +17 -2
- together/resources/__init__.py +225 -24
- together/resources/audio/__init__.py +75 -0
- together/resources/audio/audio.py +198 -0
- together/resources/audio/speech.py +605 -0
- together/resources/audio/transcriptions.py +282 -0
- together/resources/audio/translations.py +256 -0
- together/resources/audio/voices.py +135 -0
- together/resources/batches.py +417 -0
- together/resources/chat/__init__.py +30 -21
- together/resources/chat/chat.py +102 -0
- together/resources/chat/completions.py +1063 -257
- together/resources/code_interpreter/__init__.py +33 -0
- together/resources/code_interpreter/code_interpreter.py +258 -0
- together/resources/code_interpreter/sessions.py +135 -0
- together/resources/completions.py +890 -225
- together/resources/embeddings.py +172 -68
- together/resources/endpoints.py +711 -0
- together/resources/evals.py +452 -0
- together/resources/files.py +397 -120
- together/resources/fine_tuning.py +1033 -0
- together/resources/hardware.py +181 -0
- together/resources/images.py +256 -108
- together/resources/jobs.py +214 -0
- together/resources/models.py +251 -44
- together/resources/rerank.py +190 -92
- together/resources/videos.py +374 -0
- together/types/__init__.py +66 -73
- together/types/audio/__init__.py +10 -0
- together/types/audio/speech_create_params.py +75 -0
- together/types/audio/transcription_create_params.py +54 -0
- together/types/audio/transcription_create_response.py +111 -0
- together/types/audio/translation_create_params.py +40 -0
- together/types/audio/translation_create_response.py +70 -0
- together/types/audio/voice_list_response.py +23 -0
- together/types/audio_speech_stream_chunk.py +16 -0
- together/types/autoscaling.py +13 -0
- together/types/autoscaling_param.py +15 -0
- together/types/batch_create_params.py +24 -0
- together/types/batch_create_response.py +14 -0
- together/types/batch_job.py +45 -0
- together/types/batch_list_response.py +10 -0
- together/types/chat/__init__.py +18 -0
- together/types/chat/chat_completion.py +60 -0
- together/types/chat/chat_completion_chunk.py +61 -0
- together/types/chat/chat_completion_structured_message_image_url_param.py +18 -0
- together/types/chat/chat_completion_structured_message_text_param.py +13 -0
- together/types/chat/chat_completion_structured_message_video_url_param.py +18 -0
- together/types/chat/chat_completion_usage.py +13 -0
- together/types/chat/chat_completion_warning.py +9 -0
- together/types/chat/completion_create_params.py +329 -0
- together/types/code_interpreter/__init__.py +5 -0
- together/types/code_interpreter/session_list_response.py +31 -0
- together/types/code_interpreter_execute_params.py +45 -0
- together/types/completion.py +42 -0
- together/types/completion_chunk.py +66 -0
- together/types/completion_create_params.py +138 -0
- together/types/dedicated_endpoint.py +44 -0
- together/types/embedding.py +24 -0
- together/types/embedding_create_params.py +31 -0
- together/types/endpoint_create_params.py +43 -0
- together/types/endpoint_list_avzones_response.py +11 -0
- together/types/endpoint_list_params.py +18 -0
- together/types/endpoint_list_response.py +41 -0
- together/types/endpoint_update_params.py +27 -0
- together/types/eval_create_params.py +263 -0
- together/types/eval_create_response.py +16 -0
- together/types/eval_list_params.py +21 -0
- together/types/eval_list_response.py +10 -0
- together/types/eval_status_response.py +100 -0
- together/types/evaluation_job.py +139 -0
- together/types/execute_response.py +108 -0
- together/types/file_delete_response.py +13 -0
- together/types/file_list.py +12 -0
- together/types/file_purpose.py +9 -0
- together/types/file_response.py +31 -0
- together/types/file_type.py +7 -0
- together/types/fine_tuning_cancel_response.py +194 -0
- together/types/fine_tuning_content_params.py +24 -0
- together/types/fine_tuning_delete_params.py +11 -0
- together/types/fine_tuning_delete_response.py +12 -0
- together/types/fine_tuning_list_checkpoints_response.py +21 -0
- together/types/fine_tuning_list_events_response.py +12 -0
- together/types/fine_tuning_list_response.py +199 -0
- together/types/finetune_event.py +41 -0
- together/types/finetune_event_type.py +33 -0
- together/types/finetune_response.py +177 -0
- together/types/hardware_list_params.py +16 -0
- together/types/hardware_list_response.py +58 -0
- together/types/image_data_b64.py +15 -0
- together/types/image_data_url.py +15 -0
- together/types/image_file.py +23 -0
- together/types/image_generate_params.py +85 -0
- together/types/job_list_response.py +47 -0
- together/types/job_retrieve_response.py +43 -0
- together/types/log_probs.py +18 -0
- together/types/model_list_response.py +10 -0
- together/types/model_object.py +42 -0
- together/types/model_upload_params.py +36 -0
- together/types/model_upload_response.py +23 -0
- together/types/rerank_create_params.py +36 -0
- together/types/rerank_create_response.py +36 -0
- together/types/tool_choice.py +23 -0
- together/types/tool_choice_param.py +23 -0
- together/types/tools_param.py +23 -0
- together/types/training_method_dpo.py +22 -0
- together/types/training_method_sft.py +18 -0
- together/types/video_create_params.py +86 -0
- together/types/video_job.py +57 -0
- together-2.0.0a8.dist-info/METADATA +680 -0
- together-2.0.0a8.dist-info/RECORD +164 -0
- {together-1.2.11.dist-info → together-2.0.0a8.dist-info}/WHEEL +1 -1
- together-2.0.0a8.dist-info/entry_points.txt +2 -0
- {together-1.2.11.dist-info → together-2.0.0a8.dist-info/licenses}/LICENSE +1 -1
- together/abstract/api_requestor.py +0 -723
- together/cli/api/chat.py +0 -276
- together/cli/api/completions.py +0 -119
- together/cli/api/finetune.py +0 -272
- together/cli/api/images.py +0 -82
- together/cli/api/models.py +0 -42
- together/client.py +0 -157
- together/constants.py +0 -31
- together/error.py +0 -191
- together/filemanager.py +0 -388
- together/legacy/__init__.py +0 -0
- together/legacy/base.py +0 -27
- together/legacy/complete.py +0 -93
- together/legacy/embeddings.py +0 -27
- together/legacy/files.py +0 -146
- together/legacy/finetune.py +0 -177
- together/legacy/images.py +0 -27
- together/legacy/models.py +0 -44
- together/resources/finetune.py +0 -489
- together/together_response.py +0 -50
- together/types/abstract.py +0 -26
- together/types/chat_completions.py +0 -171
- together/types/common.py +0 -65
- together/types/completions.py +0 -104
- together/types/embeddings.py +0 -35
- together/types/error.py +0 -16
- together/types/files.py +0 -89
- together/types/finetune.py +0 -265
- together/types/images.py +0 -42
- together/types/models.py +0 -44
- together/types/rerank.py +0 -43
- together/utils/api_helpers.py +0 -84
- together/utils/files.py +0 -204
- together/version.py +0 -6
- together-1.2.11.dist-info/METADATA +0 -408
- together-1.2.11.dist-info/RECORD +0 -58
- together-1.2.11.dist-info/entry_points.txt +0 -3
- /together/{abstract → lib/cli}/__init__.py +0 -0
- /together/{cli → lib/cli/api}/__init__.py +0 -0
- /together/{cli/api/__init__.py → py.typed} +0 -0
|
@@ -0,0 +1,628 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import csv
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Dict, List, cast
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from traceback import format_exc
|
|
9
|
+
|
|
10
|
+
from tqdm import tqdm
|
|
11
|
+
|
|
12
|
+
from together.types import FilePurpose
|
|
13
|
+
from together.lib.constants import (
|
|
14
|
+
MIN_SAMPLES,
|
|
15
|
+
DISABLE_TQDM,
|
|
16
|
+
NUM_BYTES_IN_GB,
|
|
17
|
+
MAX_FILE_SIZE_GB,
|
|
18
|
+
PARQUET_EXPECTED_COLUMNS,
|
|
19
|
+
REQUIRED_COLUMNS_MESSAGE,
|
|
20
|
+
JSONL_REQUIRED_COLUMNS_MAP,
|
|
21
|
+
POSSIBLE_ROLES_CONVERSATION,
|
|
22
|
+
DatasetFormat,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class InvalidFileFormatError(ValueError):
|
|
27
|
+
"""Exception raised for invalid file formats during file checks."""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
message: str = "",
|
|
32
|
+
line_number: int | None = None,
|
|
33
|
+
error_source: str | None = None,
|
|
34
|
+
) -> None:
|
|
35
|
+
super().__init__(message)
|
|
36
|
+
self.message = message
|
|
37
|
+
self.line_number = line_number
|
|
38
|
+
self.error_source = error_source
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def check_file(
|
|
42
|
+
file: Path | str,
|
|
43
|
+
purpose: FilePurpose | str = "fine-tune",
|
|
44
|
+
) -> Dict[str, Any]:
|
|
45
|
+
if not isinstance(file, Path):
|
|
46
|
+
file = Path(file)
|
|
47
|
+
|
|
48
|
+
report_dict: Dict[str, Any] = {
|
|
49
|
+
"is_check_passed": True,
|
|
50
|
+
"message": "Checks passed",
|
|
51
|
+
"found": None,
|
|
52
|
+
"file_size": None,
|
|
53
|
+
"utf8": None,
|
|
54
|
+
"line_type": None,
|
|
55
|
+
"text_field": None,
|
|
56
|
+
"key_value": None,
|
|
57
|
+
"has_min_samples": None,
|
|
58
|
+
"num_samples": None,
|
|
59
|
+
"load_json": None,
|
|
60
|
+
"load_csv": None,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if not file.is_file():
|
|
64
|
+
report_dict["found"] = False
|
|
65
|
+
report_dict["is_check_passed"] = False
|
|
66
|
+
return report_dict
|
|
67
|
+
else:
|
|
68
|
+
report_dict["found"] = True
|
|
69
|
+
|
|
70
|
+
file_size = os.stat(file.as_posix()).st_size
|
|
71
|
+
|
|
72
|
+
if file_size > MAX_FILE_SIZE_GB * NUM_BYTES_IN_GB:
|
|
73
|
+
report_dict["message"] = (
|
|
74
|
+
f"Maximum supported file size is {MAX_FILE_SIZE_GB} GB. Found file with size of {round(file_size / NUM_BYTES_IN_GB, 3)} GB."
|
|
75
|
+
)
|
|
76
|
+
report_dict["is_check_passed"] = False
|
|
77
|
+
elif file_size == 0:
|
|
78
|
+
report_dict["message"] = "File is empty"
|
|
79
|
+
report_dict["file_size"] = 0
|
|
80
|
+
report_dict["is_check_passed"] = False
|
|
81
|
+
return report_dict
|
|
82
|
+
else:
|
|
83
|
+
report_dict["file_size"] = file_size
|
|
84
|
+
|
|
85
|
+
data_report_dict = {}
|
|
86
|
+
if file.suffix == ".jsonl":
|
|
87
|
+
report_dict["filetype"] = "jsonl"
|
|
88
|
+
data_report_dict = _check_jsonl(file, purpose)
|
|
89
|
+
elif file.suffix == ".parquet":
|
|
90
|
+
report_dict["filetype"] = "parquet"
|
|
91
|
+
data_report_dict = _check_parquet(file, purpose)
|
|
92
|
+
elif file.suffix == ".csv":
|
|
93
|
+
report_dict["filetype"] = "csv"
|
|
94
|
+
data_report_dict = _check_csv(file, purpose)
|
|
95
|
+
else:
|
|
96
|
+
report_dict["filetype"] = (
|
|
97
|
+
f"Unknown extension of file {file}. Only files with extensions .jsonl, .parquet, and .csv are supported."
|
|
98
|
+
)
|
|
99
|
+
report_dict["is_check_passed"] = False
|
|
100
|
+
|
|
101
|
+
report_dict.update(data_report_dict)
|
|
102
|
+
|
|
103
|
+
return report_dict
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def _check_conversation_type(messages: List[Dict[str, str | bool]], idx: int) -> None:
|
|
107
|
+
"""Check that the conversation has correct type.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
messages: The messages in the conversation.
|
|
111
|
+
Can be any type, this function ensures that the messages are a list of dictionaries.
|
|
112
|
+
idx: Line number in the file.
|
|
113
|
+
|
|
114
|
+
Raises:
|
|
115
|
+
InvalidFileFormatError: If the conversation type is invalid.
|
|
116
|
+
"""
|
|
117
|
+
# if not isinstance(messages, list):
|
|
118
|
+
# raise InvalidFileFormatError(
|
|
119
|
+
# message=f"Invalid format on line {idx + 1} of the input file. "
|
|
120
|
+
# f"The `messages` column must be a list. Found {type(messages)}",
|
|
121
|
+
# line_number=idx + 1,
|
|
122
|
+
# error_source="key_value",
|
|
123
|
+
# )
|
|
124
|
+
if len(messages) == 0:
|
|
125
|
+
raise InvalidFileFormatError(
|
|
126
|
+
message=f"Invalid format on line {idx + 1} of the input file. The `messages` column must not be empty.",
|
|
127
|
+
line_number=idx + 1,
|
|
128
|
+
error_source="key_value",
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
for message in messages:
|
|
132
|
+
if not isinstance(cast(Any, message), dict):
|
|
133
|
+
raise InvalidFileFormatError(
|
|
134
|
+
message=f"Invalid format on line {idx + 1} of the input file. "
|
|
135
|
+
f"The `messages` column must be a list of dicts. Found {type(message)}",
|
|
136
|
+
line_number=idx + 1,
|
|
137
|
+
error_source="key_value",
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
for column in REQUIRED_COLUMNS_MESSAGE:
|
|
141
|
+
if column not in message:
|
|
142
|
+
raise InvalidFileFormatError(
|
|
143
|
+
message=f"Missing required column `{column}` in message on line {idx + 1}.",
|
|
144
|
+
line_number=idx + 1,
|
|
145
|
+
error_source="key_value",
|
|
146
|
+
)
|
|
147
|
+
if not isinstance(message[column], str):
|
|
148
|
+
raise InvalidFileFormatError(
|
|
149
|
+
message=f"Column `{column}` is not a string on line {idx + 1}. Found {type(message[column])}",
|
|
150
|
+
line_number=idx + 1,
|
|
151
|
+
error_source="text_field",
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _check_conversation_roles(require_assistant_role: bool, assistant_role_exists: bool, idx: int) -> None:
|
|
156
|
+
"""Check that the conversation has correct roles.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
require_assistant_role: Whether to require at least one assistant role.
|
|
160
|
+
assistant_role_exists: Whether an assistant role exists in the conversation.
|
|
161
|
+
idx: Line number in the file.
|
|
162
|
+
|
|
163
|
+
Raises:
|
|
164
|
+
InvalidFileFormatError: If the conversation roles are invalid.
|
|
165
|
+
"""
|
|
166
|
+
if require_assistant_role and not assistant_role_exists:
|
|
167
|
+
raise InvalidFileFormatError(
|
|
168
|
+
message=f"Invalid format on line {idx + 1} of the input file. "
|
|
169
|
+
"At least one message with the assistant role must be present in the example.",
|
|
170
|
+
line_number=idx + 1,
|
|
171
|
+
error_source="key_value",
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _check_message_weight(message: Dict[str, str | bool], idx: int) -> None:
|
|
176
|
+
"""Check that the message has a weight with the correct type and value.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
message: The message to check.
|
|
180
|
+
idx: Line number in the file.
|
|
181
|
+
|
|
182
|
+
Raises:
|
|
183
|
+
InvalidFileFormatError: If the message weight is invalid.
|
|
184
|
+
"""
|
|
185
|
+
if "weight" in message:
|
|
186
|
+
weight = message["weight"]
|
|
187
|
+
if not isinstance(weight, int):
|
|
188
|
+
raise InvalidFileFormatError(
|
|
189
|
+
message=f"Weight must be an integer on line {idx + 1}.",
|
|
190
|
+
line_number=idx + 1,
|
|
191
|
+
error_source="key_value",
|
|
192
|
+
)
|
|
193
|
+
if weight not in {0, 1}:
|
|
194
|
+
raise InvalidFileFormatError(
|
|
195
|
+
message=f"Weight must be either 0 or 1 on line {idx + 1}.",
|
|
196
|
+
line_number=idx + 1,
|
|
197
|
+
error_source="key_value",
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _check_message_role(message: Dict[str, str | bool], previous_role: str | bool | None, idx: int) -> str | bool:
|
|
202
|
+
"""Check that the message has correct roles.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
message: The message to check.
|
|
206
|
+
previous_role: The role of the previous message.
|
|
207
|
+
idx: Line number in the file.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
str: The role of the current message.
|
|
211
|
+
|
|
212
|
+
Raises:
|
|
213
|
+
InvalidFileFormatError: If the message role is invalid.
|
|
214
|
+
"""
|
|
215
|
+
if message["role"] not in POSSIBLE_ROLES_CONVERSATION:
|
|
216
|
+
raise InvalidFileFormatError(
|
|
217
|
+
message=f"Invalid role `{message['role']}` in conversation on line {idx + 1}. "
|
|
218
|
+
f"Possible roles: {', '.join(POSSIBLE_ROLES_CONVERSATION)}",
|
|
219
|
+
line_number=idx + 1,
|
|
220
|
+
error_source="key_value",
|
|
221
|
+
)
|
|
222
|
+
if previous_role is not None and message["role"] == previous_role:
|
|
223
|
+
raise InvalidFileFormatError(
|
|
224
|
+
message=f"Invalid role turns on line {idx + 1} of the input file. "
|
|
225
|
+
"After the optional system message, conversation roles must alternate between user/assistant/user/assistant.",
|
|
226
|
+
line_number=idx + 1,
|
|
227
|
+
error_source="key_value",
|
|
228
|
+
)
|
|
229
|
+
return message["role"]
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def validate_messages(messages: List[Dict[str, str | bool]], idx: int, require_assistant_role: bool = True) -> None:
|
|
233
|
+
"""Validate the messages column.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
messages: List of message dictionaries to validate.
|
|
237
|
+
idx: Line number in the file.
|
|
238
|
+
require_assistant_role: Whether to require at least one assistant role.
|
|
239
|
+
|
|
240
|
+
Raises:
|
|
241
|
+
InvalidFileFormatError: If the messages are invalid.
|
|
242
|
+
"""
|
|
243
|
+
_check_conversation_type(messages, idx)
|
|
244
|
+
|
|
245
|
+
has_weights = any("weight" in message for message in messages)
|
|
246
|
+
previous_role = None
|
|
247
|
+
assistant_role_exists = False
|
|
248
|
+
|
|
249
|
+
for message in messages:
|
|
250
|
+
if has_weights:
|
|
251
|
+
_check_message_weight(message, idx)
|
|
252
|
+
previous_role = _check_message_role(message, previous_role, idx)
|
|
253
|
+
assistant_role_exists |= previous_role == "assistant"
|
|
254
|
+
|
|
255
|
+
_check_conversation_roles(require_assistant_role, assistant_role_exists, idx)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def validate_preference_openai(example: Dict[str, Any], idx: int = 0) -> None:
|
|
259
|
+
"""Validate the OpenAI preference dataset format.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
example (dict): Input entry to be checked.
|
|
263
|
+
idx (int): Line number in the file.
|
|
264
|
+
|
|
265
|
+
Raises:
|
|
266
|
+
InvalidFileFormatError: If the dataset format is invalid.
|
|
267
|
+
"""
|
|
268
|
+
if not isinstance(example["input"], dict):
|
|
269
|
+
raise InvalidFileFormatError(
|
|
270
|
+
message="The dataset is malformed, the `input` field must be a dictionary.",
|
|
271
|
+
line_number=idx + 1,
|
|
272
|
+
error_source="key_value",
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
if "messages" not in example["input"]:
|
|
276
|
+
raise InvalidFileFormatError(
|
|
277
|
+
message="The dataset is malformed, the `input` dictionary must contain a `messages` field.",
|
|
278
|
+
line_number=idx + 1,
|
|
279
|
+
error_source="key_value",
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
messages: List[Dict[str, str | bool]] = cast(Any, example["input"]["messages"])
|
|
283
|
+
validate_messages(messages, idx, require_assistant_role=False)
|
|
284
|
+
|
|
285
|
+
if example["input"]["messages"][-1]["role"] == "assistant":
|
|
286
|
+
raise InvalidFileFormatError(
|
|
287
|
+
message=f"The last message in the input conversation must not be from the assistant on line {idx + 1}.",
|
|
288
|
+
line_number=idx + 1,
|
|
289
|
+
error_source="key_value",
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
keys = ["preferred_output", "non_preferred_output"]
|
|
293
|
+
|
|
294
|
+
for key in keys:
|
|
295
|
+
if key not in example:
|
|
296
|
+
raise InvalidFileFormatError(
|
|
297
|
+
message=f"The dataset is malformed, the `{key}` field must be present in the input dictionary on line {idx + 1}.",
|
|
298
|
+
line_number=idx + 1,
|
|
299
|
+
error_source="key_value",
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
if not isinstance(example[key], list):
|
|
303
|
+
raise InvalidFileFormatError(
|
|
304
|
+
message=f"The dataset is malformed, the `{key}` field must be a list on line {idx + 1}.",
|
|
305
|
+
line_number=idx + 1,
|
|
306
|
+
error_source="key_value",
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
if len(example[key]) != 1:
|
|
310
|
+
raise InvalidFileFormatError(
|
|
311
|
+
message=f"The dataset is malformed, the `{key}` list must contain exactly one message on line {idx + 1}.",
|
|
312
|
+
line_number=idx + 1,
|
|
313
|
+
error_source="key_value",
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
if not isinstance(example[key][0], dict):
|
|
317
|
+
raise InvalidFileFormatError(
|
|
318
|
+
message=f"The dataset is malformed, the first element of `{key}` must be a dictionary on line {idx + 1}.",
|
|
319
|
+
line_number=idx + 1,
|
|
320
|
+
error_source="key_value",
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
if "role" not in example[key][0]:
|
|
324
|
+
raise InvalidFileFormatError(
|
|
325
|
+
message=f"The dataset is malformed, the first element of `{key}` must have a 'role' field on line {idx + 1}.",
|
|
326
|
+
line_number=idx + 1,
|
|
327
|
+
error_source="key_value",
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if example[key][0]["role"] != "assistant":
|
|
331
|
+
raise InvalidFileFormatError(
|
|
332
|
+
message=f"The dataset is malformed, the first element of `{key}` must have the 'assistant' role on line {idx + 1}.",
|
|
333
|
+
line_number=idx + 1,
|
|
334
|
+
error_source="key_value",
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
if "content" not in example[key][0]:
|
|
338
|
+
raise InvalidFileFormatError(
|
|
339
|
+
message=f"The dataset is malformed, the first element of `{key}` must have a 'content' field on line {idx + 1}.",
|
|
340
|
+
line_number=idx + 1,
|
|
341
|
+
error_source="key_value",
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
if not isinstance(example[key][0]["content"], str):
|
|
345
|
+
raise InvalidFileFormatError(
|
|
346
|
+
message=f"The dataset is malformed, the 'content' field in `{key}` must be a string on line {idx + 1}.",
|
|
347
|
+
line_number=idx + 1,
|
|
348
|
+
error_source="key_value",
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def _check_utf8(file: Path) -> Dict[str, Any]:
|
|
353
|
+
"""Check if the file is UTF-8 encoded.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
file (Path): Path to the file to check.
|
|
357
|
+
Returns:
|
|
358
|
+
Dict[str, Any]: A dictionary with the results of the check.
|
|
359
|
+
"""
|
|
360
|
+
report_dict: Dict[str, Any] = {}
|
|
361
|
+
try:
|
|
362
|
+
# Dry-run UTF-8 decode by iterating through the file to avoid loading it entirely into memory
|
|
363
|
+
with file.open(encoding="utf-8") as f:
|
|
364
|
+
for _ in f:
|
|
365
|
+
pass
|
|
366
|
+
report_dict["utf8"] = True
|
|
367
|
+
except UnicodeDecodeError as e:
|
|
368
|
+
report_dict["utf8"] = False
|
|
369
|
+
report_dict["message"] = f"File is not UTF-8 encoded. Error raised: {e}."
|
|
370
|
+
report_dict["is_check_passed"] = False
|
|
371
|
+
return report_dict
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def _check_samples_count(file: Path, report_dict: Dict[str, Any], idx: int) -> Dict[str, Any]:
|
|
375
|
+
if idx + 1 < MIN_SAMPLES:
|
|
376
|
+
report_dict["has_min_samples"] = False
|
|
377
|
+
report_dict["message"] = (
|
|
378
|
+
f"Processing {file} resulted in only {idx + 1} samples. Our minimum is {MIN_SAMPLES} samples. "
|
|
379
|
+
)
|
|
380
|
+
report_dict["is_check_passed"] = False
|
|
381
|
+
else:
|
|
382
|
+
report_dict["num_samples"] = idx + 1
|
|
383
|
+
report_dict["has_min_samples"] = True
|
|
384
|
+
|
|
385
|
+
return report_dict
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
def _check_csv(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
|
|
389
|
+
"""Check if the file is a valid CSV file.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
file (Path): Path to the file to check.
|
|
393
|
+
purpose (FilePurpose | str): Purpose of the file, used to determine if the file should be checked for specific columns.
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
Dict[str, Any]: A dictionary with the results of the check.
|
|
397
|
+
"""
|
|
398
|
+
report_dict: Dict[str, Any] = {}
|
|
399
|
+
if purpose != "eval":
|
|
400
|
+
report_dict["is_check_passed"] = False
|
|
401
|
+
report_dict["message"] = (
|
|
402
|
+
f"CSV files are not supported for {purpose}. Only JSONL and Parquet files are supported."
|
|
403
|
+
)
|
|
404
|
+
return report_dict
|
|
405
|
+
|
|
406
|
+
report_dict.update(_check_utf8(file))
|
|
407
|
+
|
|
408
|
+
if not report_dict["utf8"]:
|
|
409
|
+
return report_dict
|
|
410
|
+
|
|
411
|
+
with file.open() as f:
|
|
412
|
+
reader = csv.DictReader(f)
|
|
413
|
+
if not reader.fieldnames:
|
|
414
|
+
report_dict["message"] = "CSV file is empty or has no header."
|
|
415
|
+
report_dict["is_check_passed"] = False
|
|
416
|
+
return report_dict
|
|
417
|
+
idx = -1
|
|
418
|
+
|
|
419
|
+
try:
|
|
420
|
+
# for loop to iterate through the CSV rows
|
|
421
|
+
for idx, item in enumerate(reader):
|
|
422
|
+
if None in item.keys() or None in item.values():
|
|
423
|
+
raise InvalidFileFormatError(
|
|
424
|
+
message=f"CSV file is malformed or the number of columns found on line {idx + 1} is inconsistent with the header",
|
|
425
|
+
line_number=idx + 1,
|
|
426
|
+
error_source="format",
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
report_dict.update(_check_samples_count(file, report_dict, idx))
|
|
430
|
+
report_dict["load_csv"] = True
|
|
431
|
+
|
|
432
|
+
except InvalidFileFormatError as e:
|
|
433
|
+
report_dict["load_csv"] = False
|
|
434
|
+
report_dict["is_check_passed"] = False
|
|
435
|
+
report_dict["message"] = e.message
|
|
436
|
+
if e.line_number is not None:
|
|
437
|
+
report_dict["line_number"] = e.line_number
|
|
438
|
+
if e.error_source is not None:
|
|
439
|
+
report_dict[e.error_source] = False
|
|
440
|
+
except ValueError:
|
|
441
|
+
report_dict["load_csv"] = False
|
|
442
|
+
if idx < 0:
|
|
443
|
+
report_dict["message"] = "Unable to decode file. File may be empty or in an unsupported format. "
|
|
444
|
+
else:
|
|
445
|
+
report_dict["message"] = f"Error parsing the CSV file. Unexpected format on line {idx + 1}."
|
|
446
|
+
report_dict["is_check_passed"] = False
|
|
447
|
+
|
|
448
|
+
return report_dict
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def _check_jsonl(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
|
|
452
|
+
report_dict: Dict[str, Any] = {}
|
|
453
|
+
report_dict.update(_check_utf8(file))
|
|
454
|
+
if not report_dict["utf8"]:
|
|
455
|
+
return report_dict
|
|
456
|
+
|
|
457
|
+
dataset_format = None
|
|
458
|
+
with file.open() as f:
|
|
459
|
+
idx = -1
|
|
460
|
+
try:
|
|
461
|
+
for idx, line in tqdm(
|
|
462
|
+
enumerate(f),
|
|
463
|
+
desc="Validating file",
|
|
464
|
+
unit=" lines",
|
|
465
|
+
disable=bool(DISABLE_TQDM),
|
|
466
|
+
):
|
|
467
|
+
json_line = json.loads(line)
|
|
468
|
+
|
|
469
|
+
if not isinstance(json_line, dict):
|
|
470
|
+
raise InvalidFileFormatError(
|
|
471
|
+
message=(
|
|
472
|
+
f"Error parsing file. Invalid format on line {idx + 1} of the input file. "
|
|
473
|
+
"Datasets must follow text, conversational, or instruction format. For more"
|
|
474
|
+
"information, see https://docs.together.ai/docs/fine-tuning-data-preparation"
|
|
475
|
+
),
|
|
476
|
+
line_number=idx + 1,
|
|
477
|
+
error_source="line_type",
|
|
478
|
+
)
|
|
479
|
+
# In evals, we don't check the format of the dataset.
|
|
480
|
+
if purpose != "eval":
|
|
481
|
+
current_format = None
|
|
482
|
+
for possible_format in JSONL_REQUIRED_COLUMNS_MAP:
|
|
483
|
+
if all(column in json_line for column in JSONL_REQUIRED_COLUMNS_MAP[possible_format]):
|
|
484
|
+
if current_format is None:
|
|
485
|
+
current_format = possible_format
|
|
486
|
+
elif current_format != possible_format: # type: ignore[unreachable]
|
|
487
|
+
raise InvalidFileFormatError(
|
|
488
|
+
message="Found multiple dataset formats in the input file. "
|
|
489
|
+
f"Got {current_format} and {possible_format} on line {idx + 1}.",
|
|
490
|
+
line_number=idx + 1,
|
|
491
|
+
error_source="format",
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
# Check that there are no extra columns
|
|
495
|
+
for column in cast(List[str], json_line.keys()):
|
|
496
|
+
if column not in JSONL_REQUIRED_COLUMNS_MAP[possible_format]:
|
|
497
|
+
raise InvalidFileFormatError(
|
|
498
|
+
message=f'Found extra column "{column}" in the line {idx + 1}.',
|
|
499
|
+
line_number=idx + 1,
|
|
500
|
+
error_source="format",
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
if current_format is None:
|
|
504
|
+
raise InvalidFileFormatError(
|
|
505
|
+
message=(
|
|
506
|
+
f"Error parsing file. Could not detect a format for the line {idx + 1} with the columns:\n"
|
|
507
|
+
f"{json_line.keys()}"
|
|
508
|
+
),
|
|
509
|
+
line_number=idx + 1,
|
|
510
|
+
error_source="format",
|
|
511
|
+
)
|
|
512
|
+
if current_format == DatasetFormat.PREFERENCE_OPENAI:
|
|
513
|
+
validate_preference_openai(cast(Dict[str, Any], json_line), idx)
|
|
514
|
+
elif current_format == DatasetFormat.CONVERSATION:
|
|
515
|
+
message_column = JSONL_REQUIRED_COLUMNS_MAP[DatasetFormat.CONVERSATION][0]
|
|
516
|
+
require_assistant = purpose != "eval"
|
|
517
|
+
message: List[Dict[str, str | bool]] = cast(Any, json_line[message_column])
|
|
518
|
+
validate_messages(
|
|
519
|
+
message,
|
|
520
|
+
idx,
|
|
521
|
+
require_assistant_role=require_assistant,
|
|
522
|
+
)
|
|
523
|
+
else:
|
|
524
|
+
for column in JSONL_REQUIRED_COLUMNS_MAP[current_format]:
|
|
525
|
+
if not isinstance(json_line[column], str):
|
|
526
|
+
raise InvalidFileFormatError(
|
|
527
|
+
message=f'Invalid value type for "{column}" key on line {idx + 1}. '
|
|
528
|
+
f"Expected string. Found {type(cast(Any, json_line[column]))}.",
|
|
529
|
+
line_number=idx + 1,
|
|
530
|
+
error_source="key_value",
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
if dataset_format is None:
|
|
534
|
+
dataset_format = current_format
|
|
535
|
+
elif current_format != dataset_format: # type: ignore[unreachable]
|
|
536
|
+
raise InvalidFileFormatError(
|
|
537
|
+
message="All samples in the dataset must have the same dataset format. "
|
|
538
|
+
f"Got {dataset_format} for the first line and {current_format} "
|
|
539
|
+
f"for the line {idx + 1}.",
|
|
540
|
+
line_number=idx + 1,
|
|
541
|
+
error_source="format",
|
|
542
|
+
)
|
|
543
|
+
report_dict.update(_check_samples_count(file, report_dict, idx))
|
|
544
|
+
|
|
545
|
+
report_dict["load_json"] = True
|
|
546
|
+
|
|
547
|
+
except InvalidFileFormatError as e:
|
|
548
|
+
report_dict["load_json"] = False
|
|
549
|
+
report_dict["is_check_passed"] = False
|
|
550
|
+
report_dict["message"] = e.message
|
|
551
|
+
if e.line_number is not None:
|
|
552
|
+
report_dict["line_number"] = e.line_number
|
|
553
|
+
if e.error_source is not None:
|
|
554
|
+
report_dict[e.error_source] = False
|
|
555
|
+
except ValueError:
|
|
556
|
+
report_dict["load_json"] = False
|
|
557
|
+
if idx < 0:
|
|
558
|
+
report_dict["message"] = "Unable to decode file. File may be empty or in an unsupported format. "
|
|
559
|
+
else:
|
|
560
|
+
report_dict["message"] = f"Error parsing json payload. Unexpected format on line {idx + 1}."
|
|
561
|
+
report_dict["is_check_passed"] = False
|
|
562
|
+
|
|
563
|
+
if "text_field" not in report_dict:
|
|
564
|
+
report_dict["text_field"] = True
|
|
565
|
+
if "line_type" not in report_dict:
|
|
566
|
+
report_dict["line_type"] = True
|
|
567
|
+
if "key_value" not in report_dict:
|
|
568
|
+
report_dict["key_value"] = True
|
|
569
|
+
return report_dict
|
|
570
|
+
|
|
571
|
+
|
|
572
|
+
def _check_parquet(file: Path, purpose: FilePurpose | str) -> Dict[str, Any]:
|
|
573
|
+
try:
|
|
574
|
+
# Pyarrow is optional as it's large (~80MB) and isn't compatible with older systems.
|
|
575
|
+
from pyarrow import ArrowInvalid, parquet
|
|
576
|
+
except ImportError as e:
|
|
577
|
+
raise ImportError(
|
|
578
|
+
"pyarrow is not installed and is required to use parquet files. Please install it via `pip install together[pyarrow]`"
|
|
579
|
+
) from e
|
|
580
|
+
|
|
581
|
+
report_dict: Dict[str, Any] = {}
|
|
582
|
+
if purpose == "eval":
|
|
583
|
+
report_dict["is_check_passed"] = False
|
|
584
|
+
report_dict["message"] = (
|
|
585
|
+
f"Parquet files are not supported for {purpose}. Only JSONL and CSV files are supported."
|
|
586
|
+
)
|
|
587
|
+
return report_dict
|
|
588
|
+
|
|
589
|
+
try:
|
|
590
|
+
table = parquet.read_table(str(file), memory_map=True) # type: ignore[reportUnknownMemberType]
|
|
591
|
+
except ArrowInvalid:
|
|
592
|
+
report_dict["load_parquet"] = (
|
|
593
|
+
f"An exception has occurred when loading the Parquet file {file}. Please check the file for corruption. "
|
|
594
|
+
f"Exception trace:\n{format_exc()}"
|
|
595
|
+
)
|
|
596
|
+
report_dict["is_check_passed"] = False
|
|
597
|
+
return report_dict
|
|
598
|
+
|
|
599
|
+
column_names = table.schema.names
|
|
600
|
+
if "input_ids" not in column_names:
|
|
601
|
+
report_dict["load_parquet"] = f"Parquet file {file} does not contain the `input_ids` column."
|
|
602
|
+
report_dict["is_check_passed"] = False
|
|
603
|
+
return report_dict
|
|
604
|
+
|
|
605
|
+
# Don't check for eval
|
|
606
|
+
for column_name in column_names:
|
|
607
|
+
if column_name not in PARQUET_EXPECTED_COLUMNS:
|
|
608
|
+
report_dict["load_parquet"] = (
|
|
609
|
+
f"Parquet file {file} contains an unexpected column {column_name}. "
|
|
610
|
+
f"Only columns {PARQUET_EXPECTED_COLUMNS} are supported."
|
|
611
|
+
)
|
|
612
|
+
report_dict["is_check_passed"] = False
|
|
613
|
+
return report_dict
|
|
614
|
+
|
|
615
|
+
num_samples = len(table)
|
|
616
|
+
if num_samples < MIN_SAMPLES:
|
|
617
|
+
report_dict["has_min_samples"] = False
|
|
618
|
+
report_dict["message"] = (
|
|
619
|
+
f"Processing {file} resulted in only {num_samples} samples. Our minimum is {MIN_SAMPLES} samples. "
|
|
620
|
+
)
|
|
621
|
+
report_dict["is_check_passed"] = False
|
|
622
|
+
return report_dict
|
|
623
|
+
else:
|
|
624
|
+
report_dict["num_samples"] = num_samples
|
|
625
|
+
|
|
626
|
+
report_dict["is_check_passed"] = True
|
|
627
|
+
|
|
628
|
+
return report_dict
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def datetime_serializer(obj: Any) -> str:
|
|
8
|
+
if isinstance(obj, datetime):
|
|
9
|
+
return obj.isoformat()
|
|
10
|
+
raise TypeError(f"Object of type {obj.__class__.__name__} is not JSON serializable")
|
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import logging
|
|
4
3
|
import os
|
|
4
|
+
import logging
|
|
5
5
|
from datetime import datetime
|
|
6
6
|
|
|
7
|
-
|
|
8
7
|
logger = logging.getLogger("together")
|
|
9
8
|
|
|
10
9
|
TOGETHER_LOG = os.environ.get("TOGETHER_LOG")
|
|
@@ -73,3 +72,19 @@ def convert_unix_timestamp(timestamp: int) -> str:
|
|
|
73
72
|
iso_format = dt_object.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
|
74
73
|
|
|
75
74
|
return iso_format
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def format_timestamp(timestamp_str: str) -> str:
|
|
78
|
+
"""Format timestamp to a readable date string.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
timestamp: A timestamp string
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
str: Formatted timestamp string (MM/DD/YYYY, HH:MM AM/PM)
|
|
85
|
+
"""
|
|
86
|
+
try:
|
|
87
|
+
timestamp = parse_timestamp(timestamp_str)
|
|
88
|
+
return timestamp.strftime("%m/%d/%Y, %I:%M %p")
|
|
89
|
+
except ValueError:
|
|
90
|
+
return ""
|