mldataforge 0.0.2__tar.gz → 0.0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mldataforge
3
- Version: 0.0.2
3
+ Version: 0.0.3
4
4
  Summary: swiss army knife of scripts for transforming and processing datasets for machine learning.
5
5
  Project-URL: Homepage, https://github.com/schneiderkamplab/mldataforge
6
6
  Project-URL: Bug Tracker, https://github.com/schneiderkamplab/mldataforge/issues
@@ -1,6 +1,7 @@
1
1
  import click
2
2
 
3
3
  from .jsonl import jsonl
4
+ from .mds import mds
4
5
 
5
6
  __all__ = ["parquet"]
6
7
 
@@ -9,3 +10,4 @@ def parquet():
9
10
  pass
10
11
 
11
12
  parquet.add_command(jsonl)
13
+ parquet.add_command(mds)
@@ -0,0 +1,44 @@
1
+ #!/usr/bin/env python
2
+ import click
3
+ import json
4
+ import os
5
+ from streaming import MDSWriter
6
+ from tqdm import tqdm
7
+
8
+ from ....utils import check_overwrite, infer_mds_encoding, load_parquet_files, pigz_compress, use_pigz
9
+
10
+ @click.command()
11
+ @click.argument('output_dir', type=click.Path(exists=False))
12
+ @click.argument('parquet_files', nargs=-1, type=click.Path(exists=True))
13
+ @click.option('--compression', type=click.Choice(['none', 'br', 'bz2', 'gzip', 'pigz', 'snappy', 'zstd'], case_sensitive=False), default=None, help='Compression type for the output dataset (default: None).')
14
+ @click.option("--processes", default=64, help="Number of processes to use for pigz compression (default: 64).")
15
+ @click.option("--overwrite", is_flag=True, help="Overwrite existing MDS directory.")
16
+ @click.option("--yes", is_flag=True, help="Assume yes to all prompts. Use with caution as it will remove entire directory trees without confirmation.")
17
+ @click.option("--buf-size", default=2**24, help=f"Buffer size for pigz compression (default: {2**24}).")
18
+ def mds(output_dir, parquet_files, processes, compression, overwrite, yes, buf_size):
19
+ check_overwrite(output_dir, overwrite, yes)
20
+ if not parquet_files:
21
+ raise click.BadArgumentUsage("No parquet files provided.")
22
+ ds = load_parquet_files(parquet_files)
23
+ pigz = use_pigz(compression)
24
+ sample = ds[0]
25
+ if compression == "none" or pigz:
26
+ compression = None
27
+ if compression == "gzip":
28
+ compression = "gz"
29
+ columns = {key: infer_mds_encoding(value) for key, value in sample.items()}
30
+ lines = 0
31
+ with MDSWriter(out=output_dir, columns=columns, compression=compression) as writer:
32
+ for item in tqdm(ds, desc="Processing samples", unit="sample"):
33
+ writer.write(item)
34
+ lines += 1
35
+ print(f"Wrote {lines} lines from {len(parquet_files)} files to MDS files in {output_dir}")
36
+ if pigz:
37
+ file_paths = []
38
+ for file in os.listdir(output_dir):
39
+ if file.endswith(".mds"):
40
+ file_paths.append(os.path.join(output_dir, file))
41
+ for file_path in tqdm(file_paths, desc="Compressing with pigz", unit="file"):
42
+ pigz_compress(file_path, file_path + ".gz", processes, buf_size=buf_size, keep=False, quiet=True)
43
+ output_dir
44
+ print(f"Compressed {output_dir} with pigz")
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "mldataforge"
7
- version = "0.0.2"
7
+ version = "0.0.3"
8
8
  authors = [
9
9
  { name = "Peter Schneider-Kamp" }
10
10
  ]
File without changes
File without changes
File without changes