mldataforge 0.1.2__tar.gz → 0.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mldataforge-0.1.2 → mldataforge-0.1.4}/PKG-INFO +5 -2
- {mldataforge-0.1.2 → mldataforge-0.1.4}/README.md +4 -1
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/commands/__init__.py +2 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/commands/convert/jsonl.py +3 -1
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/commands/convert/parquet.py +3 -1
- mldataforge-0.1.4/mldataforge/commands/split.py +78 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/options.py +41 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/pigz.py +7 -1
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/utils.py +85 -31
- {mldataforge-0.1.2 → mldataforge-0.1.4}/pyproject.toml +1 -1
- {mldataforge-0.1.2 → mldataforge-0.1.4}/.gitignore +0 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/LICENSE +0 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/__main__.py +0 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/commands/convert/__init__.py +0 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/commands/convert/mds.py +0 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/commands/join.py +0 -0
- {mldataforge-0.1.2 → mldataforge-0.1.4}/mldataforge/mds.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mldataforge
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.4
|
4
4
|
Summary: swiss army knife of scripts for transforming and processing datasets for machine learning.
|
5
5
|
Project-URL: Homepage, https://github.com/schneiderkamplab/mldataforge
|
6
6
|
Project-URL: Bug Tracker, https://github.com/schneiderkamplab/mldataforge/issues
|
@@ -30,9 +30,12 @@ Description-Content-Type: text/markdown
|
|
30
30
|
# mldatasets
|
31
31
|
swiss army knife of scripts for transforming and processing datasets for machine learning
|
32
32
|
|
33
|
-
##
|
33
|
+
## conversion
|
34
34
|
Currently, mldataforge provides space- and time-efficient conversions between JSONL (with or without compression), MosaiclML Dataset (MDS format), and Parquet. The implementations handle conversions by individual samples or small batches of samples and make efficient use of multi-core architectures where possible. Consequently, mldataforge is an excellent choice when transforming TB-scale datasets on data processing nodes with many cores.
|
35
35
|
|
36
|
+
## splitting
|
37
|
+
Currently, mldataforge provides space- and time-efficient splitting of JSONL (with or without compression). The implementations handle conversions by individual samples or small batches of samples and make efficient use of multi-core architectures where possible. The splitting function can take an already splitted dataset and re-split it with a different granularity.
|
38
|
+
|
36
39
|
## installation and general usage
|
37
40
|
```
|
38
41
|
pip install mldataforge
|
@@ -1,9 +1,12 @@
|
|
1
1
|
# mldatasets
|
2
2
|
swiss army knife of scripts for transforming and processing datasets for machine learning
|
3
3
|
|
4
|
-
##
|
4
|
+
## conversion
|
5
5
|
Currently, mldataforge provides space- and time-efficient conversions between JSONL (with or without compression), MosaiclML Dataset (MDS format), and Parquet. The implementations handle conversions by individual samples or small batches of samples and make efficient use of multi-core architectures where possible. Consequently, mldataforge is an excellent choice when transforming TB-scale datasets on data processing nodes with many cores.
|
6
6
|
|
7
|
+
## splitting
|
8
|
+
Currently, mldataforge provides space- and time-efficient splitting of JSONL (with or without compression). The implementations handle conversions by individual samples or small batches of samples and make efficient use of multi-core architectures where possible. The splitting function can take an already splitted dataset and re-split it with a different granularity.
|
9
|
+
|
7
10
|
## installation and general usage
|
8
11
|
```
|
9
12
|
pip install mldataforge
|
@@ -18,7 +18,8 @@ def jsonl():
|
|
18
18
|
@yes_option()
|
19
19
|
@processes_option()
|
20
20
|
@buf_size_option()
|
21
|
-
|
21
|
+
@shard_size_option()
|
22
|
+
def mds(output_dir, jsonl_files, compression, processes, overwrite, yes, buf_size, shard_size):
|
22
23
|
check_arguments(output_dir, overwrite, yes, jsonl_files)
|
23
24
|
save_mds(
|
24
25
|
load_dataset("json", data_files=jsonl_files, split="train"),
|
@@ -27,6 +28,7 @@ def mds(output_dir, jsonl_files, compression, processes, overwrite, yes, buf_siz
|
|
27
28
|
compression=compression,
|
28
29
|
buf_size=buf_size,
|
29
30
|
pigz=use_pigz(compression),
|
31
|
+
shard_size=shard_size,
|
30
32
|
)
|
31
33
|
|
32
34
|
@jsonl.command()
|
@@ -34,7 +34,8 @@ def jsonl(output_file, parquet_files, compression, processes, overwrite, yes):
|
|
34
34
|
@overwrite_option()
|
35
35
|
@yes_option()
|
36
36
|
@buf_size_option()
|
37
|
-
|
37
|
+
@shard_size_option()
|
38
|
+
def mds(output_dir, parquet_files, compression, processes, overwrite, yes, buf_size, shard_size):
|
38
39
|
check_arguments(output_dir, overwrite, yes, parquet_files)
|
39
40
|
save_mds(
|
40
41
|
load_dataset("parquet", data_files=parquet_files, split="train"),
|
@@ -43,4 +44,5 @@ def mds(output_dir, parquet_files, compression, processes, overwrite, yes, buf_s
|
|
43
44
|
compression=compression,
|
44
45
|
buf_size=buf_size,
|
45
46
|
pigz=use_pigz(compression),
|
47
|
+
shard_size=shard_size,
|
46
48
|
)
|
@@ -0,0 +1,78 @@
|
|
1
|
+
import click
|
2
|
+
from datasets import load_dataset
|
3
|
+
|
4
|
+
from ..options import *
|
5
|
+
from ..utils import *
|
6
|
+
|
7
|
+
__all__ = ["split"]
|
8
|
+
|
9
|
+
@click.group()
|
10
|
+
def split():
|
11
|
+
pass
|
12
|
+
|
13
|
+
@split.command()
|
14
|
+
@click.argument("jsonl_files", type=click.Path(exists=True), required=True, nargs=-1)
|
15
|
+
@prefix_option()
|
16
|
+
@output_dir_option()
|
17
|
+
@size_hint_option()
|
18
|
+
@compression_option("infer", ["none", "infer", "pigz", "gzip", "bz2", "xz"])
|
19
|
+
@processes_option()
|
20
|
+
@overwrite_option()
|
21
|
+
@yes_option()
|
22
|
+
def jsonl(jsonl_files, prefix, output_dir, size_hint, compression, processes, overwrite, yes):
|
23
|
+
save_jsonl(
|
24
|
+
load_dataset("json", data_files=jsonl_files, split="train"),
|
25
|
+
output_file=f"{output_dir}/{prefix}{{part:04d}}.jsonl{extension(compression, jsonl_files[0])}",
|
26
|
+
compression=compression,
|
27
|
+
processes=processes,
|
28
|
+
size_hint=size_hint,
|
29
|
+
overwrite=overwrite,
|
30
|
+
yes=yes,
|
31
|
+
)
|
32
|
+
|
33
|
+
@split.command()
|
34
|
+
@click.argument("mds_directories", type=click.Path(exists=True), required=True, nargs=-1)
|
35
|
+
@prefix_option()
|
36
|
+
@output_dir_option()
|
37
|
+
@size_hint_option()
|
38
|
+
@compression_option(None, ['none', 'br', 'bz2', 'gzip', 'pigz', 'snappy', 'zstd'])
|
39
|
+
@processes_option()
|
40
|
+
@overwrite_option()
|
41
|
+
@yes_option()
|
42
|
+
@buf_size_option()
|
43
|
+
@batch_size_option()
|
44
|
+
@no_bulk_option()
|
45
|
+
@shard_size_option()
|
46
|
+
def mds(mds_directories, prefix, output_dir, size_hint, compression, processes, overwrite, yes, buf_size, batch_size, no_bulk, shard_size):
|
47
|
+
save_mds(
|
48
|
+
load_mds_directories(mds_directories, batch_size=batch_size, bulk=not no_bulk),
|
49
|
+
output_dir=f"{output_dir}/{prefix}{{part:04d}}",
|
50
|
+
processes=processes,
|
51
|
+
compression=compression,
|
52
|
+
buf_size=buf_size,
|
53
|
+
pigz=use_pigz(compression),
|
54
|
+
shard_size=shard_size,
|
55
|
+
size_hint=size_hint,
|
56
|
+
overwrite=overwrite,
|
57
|
+
yes=yes,
|
58
|
+
)
|
59
|
+
|
60
|
+
@split.command()
|
61
|
+
@click.argument("parquet_files", type=click.Path(exists=True), required=True, nargs=-1)
|
62
|
+
@prefix_option()
|
63
|
+
@output_dir_option()
|
64
|
+
@size_hint_option()
|
65
|
+
@compression_option("snappy", ["snappy", "gzip", "zstd"])
|
66
|
+
@overwrite_option()
|
67
|
+
@yes_option()
|
68
|
+
@batch_size_option()
|
69
|
+
def parquet(parquet_files, prefix, output_dir, size_hint, compression, overwrite, yes, batch_size):
|
70
|
+
save_parquet(
|
71
|
+
load_dataset("parquet", data_files=parquet_files, split="train"),
|
72
|
+
output_file=f"{output_dir}/{prefix}{{part:04d}}.parquet",
|
73
|
+
compression=compression,
|
74
|
+
batch_size=batch_size,
|
75
|
+
size_hint=size_hint,
|
76
|
+
overwrite=overwrite,
|
77
|
+
yes=yes,
|
78
|
+
)
|
@@ -50,6 +50,17 @@ def compression_option(default, choices):
|
|
50
50
|
help=f"Compress the output file (default: {default}).",
|
51
51
|
)
|
52
52
|
|
53
|
+
def output_dir_option(default="."):
|
54
|
+
"""
|
55
|
+
Option for specifying the output directory.
|
56
|
+
"""
|
57
|
+
return click.option(
|
58
|
+
"--output-dir",
|
59
|
+
default=default,
|
60
|
+
type=click.Path(exists=False),
|
61
|
+
help="Output directory.",
|
62
|
+
)
|
63
|
+
|
53
64
|
def overwrite_option():
|
54
65
|
"""
|
55
66
|
Option for specifying whether to overwrite existing files.
|
@@ -60,6 +71,16 @@ def overwrite_option():
|
|
60
71
|
help="Overwrite existing path.",
|
61
72
|
)
|
62
73
|
|
74
|
+
def prefix_option(default="part-"):
|
75
|
+
"""
|
76
|
+
Option for specifying the prefix for output files.
|
77
|
+
"""
|
78
|
+
return click.option(
|
79
|
+
"--prefix",
|
80
|
+
default=default,
|
81
|
+
help=f"Prefix for output files (default: {default}).",
|
82
|
+
)
|
83
|
+
|
63
84
|
def processes_option(default=64):
|
64
85
|
"""
|
65
86
|
Option for specifying the number of processes to use.
|
@@ -70,6 +91,26 @@ def processes_option(default=64):
|
|
70
91
|
help=f"Number of processes to use (default: {default}).",
|
71
92
|
)
|
72
93
|
|
94
|
+
def shard_size_option(default=2**26):
|
95
|
+
"""
|
96
|
+
Option for specifying the shard size.
|
97
|
+
"""
|
98
|
+
return click.option(
|
99
|
+
"--shard-size",
|
100
|
+
default=default,
|
101
|
+
help=f"Shard size for the dataset (default: {default}).",
|
102
|
+
)
|
103
|
+
|
104
|
+
def size_hint_option(default=2**26):
|
105
|
+
"""
|
106
|
+
Option for specifying the size hint.
|
107
|
+
"""
|
108
|
+
return click.option(
|
109
|
+
"--size-hint",
|
110
|
+
default=default,
|
111
|
+
help=f"Size hint for the dataset (default: {default}).",
|
112
|
+
)
|
113
|
+
|
73
114
|
def yes_option():
|
74
115
|
"""
|
75
116
|
Option for specifying whether to assume yes to all prompts.
|
@@ -16,6 +16,7 @@ class PigzFile(object):
|
|
16
16
|
self.encoding = encoding if self.is_text else None
|
17
17
|
self._process = None
|
18
18
|
self._fw = None
|
19
|
+
self.offset = 0
|
19
20
|
args = ["pigz", "-p", str(self.processes), "-c"]
|
20
21
|
if self.is_read:
|
21
22
|
args.extend(("-d", self.path))
|
@@ -28,6 +29,7 @@ class PigzFile(object):
|
|
28
29
|
assert self.is_read
|
29
30
|
for line in self._process.stdout:
|
30
31
|
assert isinstance(line, str) if self.is_text else isinstance(line, bytes)
|
32
|
+
self.offset += len(line)
|
31
33
|
yield line
|
32
34
|
self._process.wait()
|
33
35
|
assert self._process.returncode == 0
|
@@ -39,6 +41,7 @@ class PigzFile(object):
|
|
39
41
|
assert self._fw is not None
|
40
42
|
assert isinstance(line, str) if self.is_text else isinstance(line, bytes)
|
41
43
|
self._process.stdin.write(line)
|
44
|
+
self.offset += len(line)
|
42
45
|
|
43
46
|
def close(self):
|
44
47
|
if self._process:
|
@@ -52,7 +55,10 @@ class PigzFile(object):
|
|
52
55
|
self._process = None
|
53
56
|
self._fw.close()
|
54
57
|
self._fw = None
|
55
|
-
|
58
|
+
|
59
|
+
def tell(self):
|
60
|
+
return self.offset
|
61
|
+
|
56
62
|
def __enter__(self):
|
57
63
|
return self
|
58
64
|
|
@@ -19,6 +19,7 @@ __all__ = [
|
|
19
19
|
"batch_iterable",
|
20
20
|
"check_arguments",
|
21
21
|
"confirm_overwrite",
|
22
|
+
"extension",
|
22
23
|
"load_mds_directories",
|
23
24
|
"save_jsonl",
|
24
25
|
"save_mds",
|
@@ -36,8 +37,8 @@ def batch_iterable(iterable, batch_size):
|
|
36
37
|
if batch:
|
37
38
|
yield batch
|
38
39
|
|
39
|
-
def check_arguments(output_path, overwrite, yes, input_paths):
|
40
|
-
if not input_paths:
|
40
|
+
def check_arguments(output_path, overwrite, yes, input_paths=None):
|
41
|
+
if input_paths is not None and not input_paths:
|
41
42
|
raise click.BadArgumentUsage("No input paths provided.")
|
42
43
|
if os.path.exists(output_path):
|
43
44
|
if os.path.isfile(output_path):
|
@@ -70,6 +71,24 @@ def _determine_compression(file_path, compression="infer"):
|
|
70
71
|
compression = None
|
71
72
|
return compression
|
72
73
|
|
74
|
+
def extension(compression, file_path):
|
75
|
+
"""Get the file extension for the given compression type."""
|
76
|
+
if compression == "infer":
|
77
|
+
compression = _infer_compression(file_path)
|
78
|
+
if compression in ("gzip", "pigz"):
|
79
|
+
return ".gz"
|
80
|
+
if compression == "bz2":
|
81
|
+
return ".bz2"
|
82
|
+
if compression == "xz":
|
83
|
+
return ".xz"
|
84
|
+
if compression == "zip":
|
85
|
+
return ".zip"
|
86
|
+
if compression == "zstd":
|
87
|
+
return ".zst"
|
88
|
+
if compression is None:
|
89
|
+
return ""
|
90
|
+
raise ValueError(f"Unsupported compression type: {compression}")
|
91
|
+
|
73
92
|
def _infer_mds_encoding(value):
|
74
93
|
"""Determine the MDS encoding for a given value."""
|
75
94
|
if isinstance(value, str):
|
@@ -157,53 +176,88 @@ def _pigz_compress(input_file, output_file, processes=64, buf_size=2**24, keep=F
|
|
157
176
|
if not quiet:
|
158
177
|
print(f"Removed {input_file}")
|
159
178
|
|
160
|
-
def save_jsonl(iterable, output_file, compression=None, processes=64):
|
179
|
+
def save_jsonl(iterable, output_file, compression=None, processes=64, size_hint=None, overwrite=True, yes=True):
|
161
180
|
compression = _determine_compression(output_file, compression)
|
162
|
-
|
163
|
-
|
164
|
-
|
181
|
+
f = None
|
182
|
+
part = 0
|
183
|
+
for item in tqdm(iterable, desc="Writing to JSONL", unit="sample"):
|
184
|
+
if f is None:
|
185
|
+
part_file = output_file.format(part=part)
|
186
|
+
check_arguments(part_file, overwrite, yes)
|
187
|
+
f = _open_jsonl(part_file, mode="wb", compression=compression, processes=processes)
|
188
|
+
f.write(f"{json.dumps(item)}\n".encode("utf-8"))
|
189
|
+
if size_hint is not None and f.tell() >= size_hint:
|
190
|
+
f.close()
|
191
|
+
part += 1
|
192
|
+
f = None
|
193
|
+
if f is not None:
|
194
|
+
f.close()
|
165
195
|
|
166
|
-
def save_mds(it, output_dir, processes=64, compression=None, buf_size=2**24, pigz=False):
|
196
|
+
def save_mds(it, output_dir, processes=64, compression=None, buf_size=2**24, pigz=False, shard_size=None, size_hint=None, overwrite=True, yes=True):
|
167
197
|
if compression == "none" or pigz:
|
168
198
|
compression = None
|
169
199
|
if compression == "gzip":
|
170
200
|
compression = "gz"
|
171
201
|
writer = None
|
202
|
+
part = 0
|
203
|
+
files = []
|
172
204
|
for sample in tqdm(it, desc="Writing to MDS", unit="sample"):
|
173
205
|
if writer is None:
|
206
|
+
part_dir = output_dir.format(part=part)
|
207
|
+
check_arguments(part_dir, overwrite, yes)
|
208
|
+
files.append(part_dir)
|
174
209
|
columns = {key: _infer_mds_encoding(value) for key, value in sample.items()}
|
175
|
-
writer = MDSWriter(out=
|
210
|
+
writer = MDSWriter(out=part_dir, columns=columns, compression=compression, size_limit=shard_size)
|
211
|
+
offset = 0
|
212
|
+
prev = writer.new_shard_size
|
176
213
|
writer.write(sample)
|
177
|
-
|
214
|
+
offset += (writer.new_shard_size - prev) if prev < writer.new_shard_size else writer.new_shard_size
|
215
|
+
if size_hint is not None and offset >= size_hint:
|
216
|
+
writer.finish()
|
217
|
+
part += 1
|
218
|
+
writer = None
|
219
|
+
if writer is not None:
|
220
|
+
writer.finish()
|
178
221
|
if pigz:
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
"
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
222
|
+
for output_dir in files:
|
223
|
+
index_path = os.path.join(output_dir, "index.json")
|
224
|
+
index = json.load(open(index_path, "rt"))
|
225
|
+
name2info = {shard["raw_data"]["basename"]: shard for shard in index["shards"]}
|
226
|
+
file_names = [file for file in os.listdir(output_dir) if file.endswith(".mds")]
|
227
|
+
assert set(file_names) == set(name2info.keys())
|
228
|
+
for file_name in tqdm(file_names, desc="Compressing with pigz", unit="file"):
|
229
|
+
compressed_file_name = file_name + ".gz"
|
230
|
+
file_path = os.path.join(output_dir, file_name)
|
231
|
+
compressed_file_path = os.path.join(output_dir, compressed_file_name)
|
232
|
+
_pigz_compress(file_path, compressed_file_path, processes, buf_size=buf_size, keep=False, quiet=True)
|
233
|
+
name2info[file_name]["compression"] = "gz"
|
234
|
+
name2info[file_name]["zip_data"] = {
|
235
|
+
"basename": compressed_file_name,
|
236
|
+
"bytes": os.stat(compressed_file_path).st_size,
|
237
|
+
"hashes": {},
|
238
|
+
}
|
239
|
+
json.dump(index, open(index_path, "wt"))
|
240
|
+
print(f"Compressed {output_dir} with pigz")
|
241
|
+
|
242
|
+
def save_parquet(it, output_file, compression=None, batch_size=2**16, size_hint=None, overwrite=True, yes=True):
|
199
243
|
writer = None
|
244
|
+
part = 0
|
200
245
|
it = tqdm(it, desc="Writing to Parquet", unit="sample")
|
201
246
|
for batch in batch_iterable(it, batch_size):
|
202
247
|
table = pa.Table.from_pylist(batch)
|
203
248
|
if writer is None:
|
204
|
-
|
249
|
+
part_file = output_file.format(part=part)
|
250
|
+
check_arguments(part_file, overwrite, yes)
|
251
|
+
writer = pq.ParquetWriter(part_file, table.schema, compression=compression)
|
252
|
+
offset = 0
|
205
253
|
writer.write_table(table)
|
206
|
-
|
254
|
+
offset += table.nbytes
|
255
|
+
if size_hint is not None and offset >= size_hint:
|
256
|
+
writer.close()
|
257
|
+
part += 1
|
258
|
+
writer = None
|
259
|
+
if writer is not None:
|
260
|
+
writer.close()
|
207
261
|
|
208
262
|
def use_pigz(compression):
|
209
263
|
"""Determine if pigz should be used based on the compression type."""
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|