data-forager 0.1.2__tar.gz → 0.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {data_forager-0.1.2 → data_forager-0.1.4}/PKG-INFO +1 -1
  2. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/datasets/common.py +51 -0
  3. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/indexers/tokenization_indexer.py +98 -29
  4. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager.egg-info/PKG-INFO +1 -1
  5. {data_forager-0.1.2 → data_forager-0.1.4}/pyproject.toml +1 -1
  6. {data_forager-0.1.2 → data_forager-0.1.4}/LICENSE +0 -0
  7. {data_forager-0.1.2 → data_forager-0.1.4}/README.md +0 -0
  8. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/__init__.py +0 -0
  9. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/datasets/__init__.py +0 -0
  10. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/datasets/jsonl.py +0 -0
  11. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/datasets/tokens.py +0 -0
  12. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/index_stores/__init__.py +0 -0
  13. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/index_stores/common.py +0 -0
  14. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/index_stores/fs_based.py +0 -0
  15. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/indexers/__init__.py +0 -0
  16. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/indexers/jsonl_indexer.py +0 -0
  17. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/indexers/text_lines.py +0 -0
  18. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/sample_index.py +0 -0
  19. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/unzip_files.py +0 -0
  20. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager/utils.py +0 -0
  21. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager.egg-info/SOURCES.txt +0 -0
  22. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager.egg-info/dependency_links.txt +0 -0
  23. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager.egg-info/requires.txt +0 -0
  24. {data_forager-0.1.2 → data_forager-0.1.4}/data_forager.egg-info/top_level.txt +0 -0
  25. {data_forager-0.1.2 → data_forager-0.1.4}/setup.cfg +0 -0
  26. {data_forager-0.1.2 → data_forager-0.1.4}/tests/test_jsonl_indexing.py +0 -0
  27. {data_forager-0.1.2 → data_forager-0.1.4}/tests/test_tokenizing_indexing_jsonl.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: data-forager
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: Enabling random access to large datasets on disk for PyTorch training and other use cases
5
5
  Author-email: Freddy Snijder <forager@visionscapers.com>
6
6
  License-Expression: MIT
@@ -1,6 +1,8 @@
1
1
  import abc
2
2
  from typing import Optional, Union, Dict, List, Protocol, Any
3
3
 
4
+ import numpy as np
5
+
4
6
  from basics.base import Base
5
7
 
6
8
  from data_forager.sample_index import SampleIndex, SampleLocation
@@ -128,3 +130,52 @@ class Dataset(Base, metaclass=abc.ABCMeta):
128
130
 
129
131
  def __del__(self):
130
132
  self._close_files()
133
+
134
+
135
+ class SubsampledDataset:
136
+ """
137
+ Wrapper that provides a subsampled view of a dataset.
138
+
139
+ Randomly selects a subset of indices from the wrapped dataset, allowing
140
+ for faster iteration through epochs when testing or debugging.
141
+
142
+ :param dataset: The dataset to wrap (must support __len__ and __getitem__).
143
+ :param subsample_factor: Fraction of the dataset to use (must be between 0 and 1).
144
+ :param seed: Random seed for reproducibility. If None, sampling is random.
145
+ :param random_order: If False (default), indices are sorted for better disk
146
+ read locality. If True, indices are kept in random order, which can be
147
+ used as a randomizer.
148
+ """
149
+
150
+ def __init__(
151
+ self,
152
+ dataset,
153
+ subsample_factor: float,
154
+ seed: int | None = None,
155
+ random_order: bool = False,
156
+ ):
157
+ if not 0 < subsample_factor <= 1:
158
+ raise ValueError(
159
+ f"subsample_factor must be between 0 (exclusive) and 1 (inclusive), "
160
+ f"got {subsample_factor}"
161
+ )
162
+
163
+ self._dataset = dataset
164
+ self._subsample_factor = subsample_factor
165
+
166
+ n_full = len(dataset)
167
+ n_sub = int(subsample_factor * n_full)
168
+
169
+ # Sample indices without replacement
170
+ rng = np.random.default_rng(seed)
171
+ self._indices = rng.choice(n_full, size=n_sub, replace=False)
172
+
173
+ # Sort for cache locality unless random order is requested
174
+ if not random_order:
175
+ self._indices.sort()
176
+
177
+ def __len__(self) -> int:
178
+ return len(self._indices)
179
+
180
+ def __getitem__(self, idx: int):
181
+ return self._dataset[self._indices[idx]]
@@ -1,5 +1,6 @@
1
1
  from typing import Callable, List, Optional
2
2
 
3
+ import logging
3
4
  import os
4
5
  from pathlib import Path
5
6
 
@@ -8,7 +9,11 @@ import json
8
9
  import numpy as np
9
10
 
10
11
  from basics.base import Base
12
+ from basics.logging import get_logger
11
13
 
14
+ module_logger = get_logger(os.path.basename(__file__))
15
+
16
+ from data_forager.index_stores.common import IndexStoreInterface
12
17
  from data_forager.index_stores.fs_based import IndexStore as FSBasedIndexStore
13
18
  from data_forager.indexers.text_lines import SampleData, FileTextLinesIndexer, SampleGeneratorInterface
14
19
  from data_forager.utils import find_files_recursive, natural_sort
@@ -24,61 +29,125 @@ def get_text_from_jsonl(jsonl_bytes: bytes, text_key: str = "text", text_encodin
24
29
 
25
30
 
26
31
  def create_tokenize_and_index_jsonl_text_func(
27
- input_base_path: str,
28
32
  tokenizer_func: TokenizerFunc,
29
33
  eos_idx: int,
34
+ input_base_path: Optional[str] = None,
35
+ input_file_paths: Optional[List[str]] = None,
36
+ output_base_path: Optional[str] = None,
37
+ index_store: Optional[IndexStoreInterface] = None,
30
38
  process_text_line_func: Optional[ProcessTextLineFunc] = None,
39
+ logger: Optional[logging.Logger] = None,
31
40
  name: Optional[str] = None,
32
41
  **sample_generator_kwargs,
33
42
  ) -> FileTextLinesIndexer:
34
43
  """
35
- Create function to:
36
- * Tokenize text from input JSONL objects, loaded from files at input_base_path (recursively),
37
- * Store the token data in bin files under folder "tokenized-samples" in input_base_path
38
- * Store index data under folder "index" in input_base_path
44
+ Create a pipeline to tokenize text from JSONL files and create an index for random access.
45
+
46
+ The pipeline:
47
+ * Tokenizes text from input JSONL objects
48
+ * Stores the token data in bin files under "tokenized-samples" folder
49
+ * Stores index data under "index" folder
39
50
 
40
51
  Usage:
41
- # Create pipeline to tokenize text from input JSONL objects and index the token samples
52
+ ```python
42
53
  import tiktoken
43
54
 
44
55
  enc = tiktoken.get_encoding("gpt2")
45
56
  def tokenize_text(text: str) -> List[int]:
46
- return tiktoken.enc.encode_ordinary(text)
57
+ return enc.encode_ordinary(text)
58
+
59
+ # Option 1: Scan directory for JSONL files, output to same directory
60
+ indexer = create_tokenize_and_index_jsonl_text_func(
61
+ tokenizer_func=tokenize_text,
62
+ eos_idx=enc.eot_token,
63
+ input_base_path='./data',
64
+ sample_size=1024,
65
+ )
47
66
 
48
- tokenize_and_index_jsonl_text_func = create_jsonl_text_tokenization_and_indexing_pipeline(
49
- input_base_path='.',
67
+ # Option 2: Explicit input files and output path
68
+ indexer = create_tokenize_and_index_jsonl_text_func(
50
69
  tokenizer_func=tokenize_text,
51
- sample_size=1024
70
+ eos_idx=enc.eot_token,
71
+ input_file_paths=['./data/train.jsonl'],
72
+ output_base_path='./output',
73
+ sample_size=1024,
52
74
  )
53
75
 
54
- # Start tokenization and indexing
55
- tokenize_and_index_jsonl_text_func()
76
+ # Run tokenization and indexing
77
+ indexer()
78
+ ```
56
79
 
57
- :param input_base_path: Path to directory containing JSONL files (searched recursively).
58
80
  :param tokenizer_func: Function used to tokenize text.
59
- :param eos_idx: EOS token index, known by the used Tokenizer
81
+ :param eos_idx: EOS token index, known by the used Tokenizer.
82
+ :param input_base_path: Path to directory containing JSONL files (searched recursively).
83
+ Used as fallback for output if `output_base_path` is not provided.
84
+ :param input_file_paths: List of file paths to process. If provided, these are used
85
+ instead of scanning `input_base_path` for JSONL files.
86
+ :param output_base_path: Base path for output (index and tokenized samples).
87
+ If not provided, `input_base_path` is used.
88
+ :param index_store: Index store to use. If provided, this is used instead of
89
+ creating a new FSBasedIndexStore.
60
90
  :param process_text_line_func: Function used to process text lines.
61
91
  By default, this converts input JSON lines to dicts and returns the "text" field.
62
92
  See function get_text_from_jsonl().
63
- :param sample_generator_kwargs: Other kwargs passed to TokenizedSampleGenerator.
64
- :param name: Optional: name of the indexer to create, used for logging purposes
93
+ :param logger: Logger to use. If not provided, uses module logger.
94
+ :param name: Name of the indexer, used for logging purposes.
95
+ :param sample_generator_kwargs: Other kwargs passed to TokenizedSampleGenerator
96
+ (e.g., sample_size, token_dtype, base_output_path).
97
+
98
+ :raises ValueError: If both `input_base_path` and `input_file_paths` are None.
99
+ :raises ValueError: If `index_store` is None and both `output_base_path` and
100
+ `input_base_path` are None.
65
101
 
66
- :return: FileTextLinesIndexer instance that can be used to tokenize and index text from jsonl objects, from
67
- JSONL files at input_base_path (recursively)
102
+ :return: FileTextLinesIndexer instance that can be called to run tokenization
103
+ and indexing.
68
104
  """
69
- if process_text_line_func is None:
70
- process_text_line_func=get_text_from_jsonl
105
+ if logger is None:
106
+ logger = module_logger
71
107
 
72
- index_store = FSBasedIndexStore(
73
- base_path=input_base_path,
74
- )
75
- input_file_paths = find_files_recursive(
76
- input_base_path,
77
- extension_patterns=['*.jsonl', '*.JSONL']
78
- )
108
+ # Validate input source
109
+ if input_base_path is None and input_file_paths is None:
110
+ raise ValueError(
111
+ "Either input_base_path or input_file_paths must be provided"
112
+ )
113
+
114
+ # Determine output base path
115
+ effective_output_base_path = output_base_path or input_base_path
79
116
 
80
- # Assuming numbered files
81
- input_file_paths = natural_sort(input_file_paths)
117
+ # Validate output destination
118
+ if index_store is None and effective_output_base_path is None:
119
+ raise ValueError(
120
+ "Either index_store, output_base_path, or input_base_path must be provided "
121
+ "to determine where to store the index"
122
+ )
123
+
124
+ logger.info(f"Output base path: {effective_output_base_path}")
125
+
126
+ if process_text_line_func is None:
127
+ process_text_line_func = get_text_from_jsonl
128
+
129
+ if index_store is None:
130
+ index_store = FSBasedIndexStore(
131
+ base_path=effective_output_base_path,
132
+ )
133
+
134
+ if input_file_paths is None:
135
+ logger.info(f"Scanning for JSONL files in: {input_base_path}")
136
+ input_file_paths = find_files_recursive(
137
+ input_base_path,
138
+ extension_patterns=['*.jsonl', '*.JSONL']
139
+ )
140
+ # Assuming numbered files
141
+ input_file_paths = natural_sort(input_file_paths)
142
+ logger.info(f"Found {len(input_file_paths)} JSONL file(s)")
143
+
144
+ # Set default base_output_path for tokenized samples if not provided in kwargs
145
+ if 'base_output_path' not in sample_generator_kwargs:
146
+ default_base_output_path = os.path.join(
147
+ effective_output_base_path, "tokenized-samples"
148
+ )
149
+ logger.info(f"Tokenized samples output path: {default_base_output_path}")
150
+ sample_generator_kwargs['base_output_path'] = default_base_output_path
82
151
 
83
152
  sample_generator = TokenizedSampleGenerator(
84
153
  process_text_line_func=process_text_line_func,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: data-forager
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: Enabling random access to large datasets on disk for PyTorch training and other use cases
5
5
  Author-email: Freddy Snijder <forager@visionscapers.com>
6
6
  License-Expression: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "data-forager"
7
- version = "0.1.2"
7
+ version = "0.1.4"
8
8
  description = "Enabling random access to large datasets on disk for PyTorch training and other use cases"
9
9
  readme = "README.md"
10
10
  license = "MIT"
File without changes
File without changes
File without changes