rxnn 0.1.71__tar.gz → 0.1.72__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.71 → rxnn-0.1.72}/PKG-INFO +1 -1
  2. {rxnn-0.1.71 → rxnn-0.1.72}/pyproject.toml +1 -1
  3. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/dataset.py +16 -2
  4. {rxnn-0.1.71 → rxnn-0.1.72}/LICENSE +0 -0
  5. {rxnn-0.1.71 → rxnn-0.1.72}/README.md +0 -0
  6. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/__init__.py +0 -0
  7. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/experimental/__init__.py +0 -0
  8. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/experimental/attention.py +0 -0
  9. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/scheduler.py +0 -0
  21. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/training/tokenizer.py +0 -0
  22. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/__init__.py +0 -0
  23. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/attention.py +0 -0
  24. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.71 → rxnn-0.1.72}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.71
3
+ Version: 0.1.72
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.71"
7
+ version = "0.1.72"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -75,16 +75,30 @@ class BaseDataset(Dataset):
75
75
  return self.__class__(subset, self.tokenizer, max_seq_len=self.max_seq_len, hf_field=self.hf_field, **kwargs)
76
76
 
77
77
  def pre_tokenize(self, verbose: bool = False, log_interval: int = 10_000):
78
+ """
79
+ Pre-tokenizes all the items in the dataset, for faster training. Training with pre-tokenized
80
+ dataset could be even 2x faster.
81
+
82
+ !! This method has extremely high memory usage, when used with HuggingFace datasets,
83
+ because of converting it to list. Additionally, for the most optimal performance,
84
+ pre-tokenized items are in reversed order - it shouldn't matter for training, as
85
+ items are shuffled then by DataLoader, but you should keep that in mind in case
86
+ of reproducibility.
87
+
88
+ :param(bool) verbose:
89
+ :param(int) log_interval: Interval of verbose logs
90
+ """
78
91
  if not self.is_pre_tokenized:
79
92
  num_texts = len(self.texts)
80
93
  txts = self.texts if isinstance(self.texts, list) else self.texts.to_list()
81
94
  del self.texts
95
+ self.texts = None
82
96
  for index in range(num_texts):
83
- self.inputs.append(txts.pop())
97
+ self.inputs.append(txts.pop() if isinstance(txts, list) else txts.pop()[self.hf_field])
84
98
  if verbose and index % log_interval == 0:
85
99
  print(f'Processed {index + 1}/{num_texts}')
86
100
  self.is_pre_tokenized = True
87
- self.texts = None
101
+
88
102
 
89
103
  @classmethod
90
104
  def from_hf_hub(
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes