rxnn 0.1.74__tar.gz → 0.1.76__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.74 → rxnn-0.1.76}/PKG-INFO +1 -1
  2. {rxnn-0.1.74 → rxnn-0.1.76}/pyproject.toml +1 -1
  3. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/dataset.py +58 -6
  4. {rxnn-0.1.74 → rxnn-0.1.76}/LICENSE +0 -0
  5. {rxnn-0.1.74 → rxnn-0.1.76}/README.md +0 -0
  6. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/__init__.py +0 -0
  7. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/experimental/__init__.py +0 -0
  8. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/experimental/attention.py +0 -0
  9. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/scheduler.py +0 -0
  21. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/training/tokenizer.py +0 -0
  22. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/__init__.py +0 -0
  23. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/attention.py +0 -0
  24. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.74 → rxnn-0.1.76}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.74
3
+ Version: 0.1.76
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.74"
7
+ version = "0.1.76"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -16,6 +16,8 @@ class BaseDataset(Dataset):
16
16
  hf_field: str = 'text',
17
17
  cache_tokenized: bool = False,
18
18
  cache_remove_text: bool = True,
19
+ tokenize_in_background: bool = False,
20
+ batch_size: int = 1,
19
21
  *args,
20
22
  **kwargs
21
23
  ):
@@ -28,6 +30,17 @@ class BaseDataset(Dataset):
28
30
  self.cache_tokenized = cache_tokenized
29
31
  self.cache_remove_text = cache_remove_text
30
32
  self.inputs = []
33
+ self.is_txt_list = isinstance(self.texts, list)
34
+ self.tokenize_in_background = tokenize_in_background
35
+ self.bg_next = []
36
+ self.bg_queue = None
37
+ self.batch_size = batch_size
38
+ self.last_idx = 0
39
+ if tokenize_in_background:
40
+ for i in range(self.batch_size):
41
+ self.bg_next.append(self.get_tokenized_text(i))
42
+ self.last_idx = self.batch_size - 1
43
+
31
44
 
32
45
  def __len__(self):
33
46
  return len(self.texts if not self.is_pre_tokenized else self.inputs)
@@ -35,10 +48,50 @@ class BaseDataset(Dataset):
35
48
  def get_tokenized_text(self, idx: int, txt: str = None):
36
49
  if self.is_pre_tokenized:
37
50
  return self.inputs[idx]
51
+ elif self.tokenize_in_background:
52
+ if idx == self.last_idx - self.batch_size:
53
+ if self.bg_queue is not None:
54
+ self.bg_next = self.bg_queue
55
+ self.bg_queue = None
56
+ # TODO: schedule tokenizing a batch in background
57
+ elif idx == self.last_idx:
58
+ item = self.bg_next[idx]
59
+ self.bg_next = []
60
+ return item
61
+
62
+ if idx <= self.last_idx:
63
+ if self.bg_queue is not None:
64
+ self.bg_next = self.bg_queue
65
+ self.bg_queue = None
66
+
67
+ new_idx = idx - (self.last_idx - self.batch_size)
68
+ if new_idx in self.bg_next:
69
+ return self.bg_next[new_idx]
70
+ else:
71
+ if self.is_txt_list:
72
+ text = self.texts[idx]
73
+ else:
74
+ text = self.texts[idx][self.hf_field]
75
+
76
+ inputs = self.tokenizer(
77
+ text,
78
+ max_length=self.max_seq_len,
79
+ truncation=True,
80
+ padding='max_length',
81
+ return_tensors='pt',
82
+ return_attention_mask=True
83
+ )
84
+ if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
85
+ inputs['input_ids'][0][
86
+ (inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
87
+ if not (inputs['input_ids'][0] >= 0).all():
88
+ inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
89
+
90
+ return inputs
38
91
  else:
39
- if txt:
92
+ if txt is not None:
40
93
  text = txt
41
- elif isinstance(self.texts, list):
94
+ elif self.is_txt_list:
42
95
  text = self.texts[idx]
43
96
  else:
44
97
  text = self.texts[idx][self.hf_field]
@@ -76,7 +129,7 @@ class BaseDataset(Dataset):
76
129
  self.texts = self.texts[0:split_point] if not from_start else self.texts[split_point:-1]
77
130
  return self.__class__(subset, self.tokenizer, max_seq_len=self.max_seq_len, hf_field=self.hf_field, **kwargs)
78
131
 
79
- def pre_tokenize(self, verbose: bool = False, log_interval: int = 10_000):
132
+ def pre_tokenize(self, verbose: bool = False, log_interval: int = 10_000, map_hf_ds_to_list: bool = True):
80
133
  """
81
134
  Pre-tokenizes all the items in the dataset, for faster training. Training with pre-tokenized
82
135
  dataset could be even 2x faster.
@@ -92,12 +145,11 @@ class BaseDataset(Dataset):
92
145
  """
93
146
  if not self.is_pre_tokenized:
94
147
  num_texts = len(self.texts)
95
- is_txt_list = isinstance(self.texts, list)
96
- txts = self.texts if is_txt_list else self.texts.to_list()
148
+ txts = self.texts if self.is_txt_list else self.texts.to_list()
97
149
  del self.texts
98
150
  self.texts = None
99
151
  for index in range(num_texts):
100
- item = txts.pop() if is_txt_list else txts.pop()[self.hf_field]
152
+ item = txts.pop() if self.is_txt_list else txts.pop()[self.hf_field]
101
153
  self.inputs.append(self.get_tokenized_text(index, txt=item))
102
154
  if verbose and index % log_interval == 0:
103
155
  print(f'Processed {index + 1}/{num_texts}')
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes