rxnn 0.1.75__py3-none-any.whl → 0.1.76__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rxnn/training/dataset.py
CHANGED
@@ -16,6 +16,8 @@ class BaseDataset(Dataset):
|
|
16
16
|
hf_field: str = 'text',
|
17
17
|
cache_tokenized: bool = False,
|
18
18
|
cache_remove_text: bool = True,
|
19
|
+
tokenize_in_background: bool = False,
|
20
|
+
batch_size: int = 1,
|
19
21
|
*args,
|
20
22
|
**kwargs
|
21
23
|
):
|
@@ -28,6 +30,17 @@ class BaseDataset(Dataset):
|
|
28
30
|
self.cache_tokenized = cache_tokenized
|
29
31
|
self.cache_remove_text = cache_remove_text
|
30
32
|
self.inputs = []
|
33
|
+
self.is_txt_list = isinstance(self.texts, list)
|
34
|
+
self.tokenize_in_background = tokenize_in_background
|
35
|
+
self.bg_next = []
|
36
|
+
self.bg_queue = None
|
37
|
+
self.batch_size = batch_size
|
38
|
+
self.last_idx = 0
|
39
|
+
if tokenize_in_background:
|
40
|
+
for i in range(self.batch_size):
|
41
|
+
self.bg_next.append(self.get_tokenized_text(i))
|
42
|
+
self.last_idx = self.batch_size - 1
|
43
|
+
|
31
44
|
|
32
45
|
def __len__(self):
|
33
46
|
return len(self.texts if not self.is_pre_tokenized else self.inputs)
|
@@ -35,10 +48,50 @@ class BaseDataset(Dataset):
|
|
35
48
|
def get_tokenized_text(self, idx: int, txt: str = None):
|
36
49
|
if self.is_pre_tokenized:
|
37
50
|
return self.inputs[idx]
|
51
|
+
elif self.tokenize_in_background:
|
52
|
+
if idx == self.last_idx - self.batch_size:
|
53
|
+
if self.bg_queue is not None:
|
54
|
+
self.bg_next = self.bg_queue
|
55
|
+
self.bg_queue = None
|
56
|
+
# TODO: schedule tokenizing a batch in background
|
57
|
+
elif idx == self.last_idx:
|
58
|
+
item = self.bg_next[idx]
|
59
|
+
self.bg_next = []
|
60
|
+
return item
|
61
|
+
|
62
|
+
if idx <= self.last_idx:
|
63
|
+
if self.bg_queue is not None:
|
64
|
+
self.bg_next = self.bg_queue
|
65
|
+
self.bg_queue = None
|
66
|
+
|
67
|
+
new_idx = idx - (self.last_idx - self.batch_size)
|
68
|
+
if new_idx in self.bg_next:
|
69
|
+
return self.bg_next[new_idx]
|
70
|
+
else:
|
71
|
+
if self.is_txt_list:
|
72
|
+
text = self.texts[idx]
|
73
|
+
else:
|
74
|
+
text = self.texts[idx][self.hf_field]
|
75
|
+
|
76
|
+
inputs = self.tokenizer(
|
77
|
+
text,
|
78
|
+
max_length=self.max_seq_len,
|
79
|
+
truncation=True,
|
80
|
+
padding='max_length',
|
81
|
+
return_tensors='pt',
|
82
|
+
return_attention_mask=True
|
83
|
+
)
|
84
|
+
if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
|
85
|
+
inputs['input_ids'][0][
|
86
|
+
(inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
|
87
|
+
if not (inputs['input_ids'][0] >= 0).all():
|
88
|
+
inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
|
89
|
+
|
90
|
+
return inputs
|
38
91
|
else:
|
39
|
-
if txt:
|
92
|
+
if txt is not None:
|
40
93
|
text = txt
|
41
|
-
elif
|
94
|
+
elif self.is_txt_list:
|
42
95
|
text = self.texts[idx]
|
43
96
|
else:
|
44
97
|
text = self.texts[idx][self.hf_field]
|
@@ -92,22 +145,14 @@ class BaseDataset(Dataset):
|
|
92
145
|
"""
|
93
146
|
if not self.is_pre_tokenized:
|
94
147
|
num_texts = len(self.texts)
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
self.
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
if verbose and index % log_interval == 0:
|
104
|
-
print(f'Processed {index + 1}/{num_texts}')
|
105
|
-
else:
|
106
|
-
for index in range(num_texts):
|
107
|
-
self.inputs.append(self.get_tokenized_text(index))
|
108
|
-
del self.texts[index]
|
109
|
-
if verbose and index % log_interval == 0:
|
110
|
-
print(f'Processed {index + 1}/{num_texts}')
|
148
|
+
txts = self.texts if self.is_txt_list else self.texts.to_list()
|
149
|
+
del self.texts
|
150
|
+
self.texts = None
|
151
|
+
for index in range(num_texts):
|
152
|
+
item = txts.pop() if self.is_txt_list else txts.pop()[self.hf_field]
|
153
|
+
self.inputs.append(self.get_tokenized_text(index, txt=item))
|
154
|
+
if verbose and index % log_interval == 0:
|
155
|
+
print(f'Processed {index + 1}/{num_texts}')
|
111
156
|
self.is_pre_tokenized = True
|
112
157
|
|
113
158
|
|
@@ -12,7 +12,7 @@ rxnn/training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
12
12
|
rxnn/training/base.py,sha256=xPMA2Bg9-oUZvSZg67ls2p7Gk9pZ9IHUiIJwUzSe2K8,11766
|
13
13
|
rxnn/training/bml.py,sha256=S1ZaXTybzeJH7uVFamCr4TPl2bLyZ5xmn_lSsjThTiM,19162
|
14
14
|
rxnn/training/callbacks.py,sha256=_YfMKY_eFdc-tubhO9nYH2PXDZDQwlSI74FVOoCXpQg,22108
|
15
|
-
rxnn/training/dataset.py,sha256=
|
15
|
+
rxnn/training/dataset.py,sha256=Nqt1uRi3i89R7blS2vfuZ-fXY-OTMPTPlKjlffentOA,14761
|
16
16
|
rxnn/training/scheduler.py,sha256=ow6oALzWjWQmHSpcJEjv6tg4g4CDMvr73TypxfcefMc,712
|
17
17
|
rxnn/training/tokenizer.py,sha256=umaLByMBx_NMrQElA45HLm9gkuzyKWDTFaKVd-CjXl0,8344
|
18
18
|
rxnn/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -25,7 +25,7 @@ rxnn/transformers/moe.py,sha256=j6jEx6Ip0zttlUZKKn82azxo95lkLZs-H2GLSMD88hY,5859
|
|
25
25
|
rxnn/transformers/positional.py,sha256=2l38RS0Dini3f6Z3LUHr3XwWzg1UK7fO2C6wazWDAYU,4292
|
26
26
|
rxnn/transformers/sampler.py,sha256=poWBpxg1iuK5gEJtxHkk5VVfS9V48hs2Olqdhy_Gw8c,6548
|
27
27
|
rxnn/utils.py,sha256=d5U8i5ukovgDyqiycc2AoxObTz_eF_bgo2MKvdtJ98s,467
|
28
|
-
rxnn-0.1.
|
29
|
-
rxnn-0.1.
|
30
|
-
rxnn-0.1.
|
31
|
-
rxnn-0.1.
|
28
|
+
rxnn-0.1.76.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
|
29
|
+
rxnn-0.1.76.dist-info/METADATA,sha256=Tf_ZnSlGebQalDujkooJ4p2MGv7_ff6uHTSORzxQ3Ck,16579
|
30
|
+
rxnn-0.1.76.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
|
31
|
+
rxnn-0.1.76.dist-info/RECORD,,
|
File without changes
|
File without changes
|