rxnn 0.1.64__tar.gz → 0.1.66__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.64 → rxnn-0.1.66}/PKG-INFO +1 -1
  2. {rxnn-0.1.64 → rxnn-0.1.66}/pyproject.toml +1 -1
  3. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/dataset.py +54 -18
  4. {rxnn-0.1.64 → rxnn-0.1.66}/LICENSE +0 -0
  5. {rxnn-0.1.64 → rxnn-0.1.66}/README.md +0 -0
  6. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/__init__.py +0 -0
  7. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/experimental/__init__.py +0 -0
  8. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/experimental/attention.py +0 -0
  9. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/scheduler.py +0 -0
  21. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/training/tokenizer.py +0 -0
  22. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/__init__.py +0 -0
  23. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/attention.py +0 -0
  24. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.64 → rxnn-0.1.66}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.64
3
+ Version: 0.1.66
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.64"
7
+ version = "0.1.66"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -14,6 +14,8 @@ class BaseDataset(Dataset):
14
14
  tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
15
15
  max_seq_len: int = 1024,
16
16
  hf_field: str = 'text',
17
+ cache_tokenized: bool = False,
18
+ cache_remove_text: bool = False,
17
19
  *args,
18
20
  **kwargs
19
21
  ):
@@ -22,27 +24,60 @@ class BaseDataset(Dataset):
22
24
  self.max_seq_len = max_seq_len
23
25
  self.texts = texts
24
26
  self.hf_field = hf_field
27
+ self.is_pre_tokenized = False
28
+ self.cache_tokenized = cache_tokenized
29
+ self.cache_remove_text = cache_remove_text
30
+ self.inputs = [] if self.cache_tokenized else None
25
31
 
26
32
  def get_tokenized_text(self, idx: int):
27
- if isinstance(self.texts, list):
28
- text = self.texts[idx]
33
+ if self.is_pre_tokenized:
34
+ return self.inputs[idx]
29
35
  else:
30
- text = self.texts[idx][self.hf_field]
31
-
32
- inputs = self.tokenizer(
33
- text,
34
- max_length=self.max_seq_len,
35
- truncation=True,
36
- padding='max_length',
37
- return_tensors='pt',
38
- return_attention_mask=True
39
- )
40
- if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
41
- inputs['input_ids'][0][(inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
42
- if not (inputs['input_ids'][0] >= 0).all():
43
- inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
44
-
45
- return inputs
36
+ if isinstance(self.texts, list):
37
+ text = self.texts[idx]
38
+ else:
39
+ text = self.texts[idx][self.hf_field]
40
+
41
+ inputs = self.tokenizer(
42
+ text,
43
+ max_length=self.max_seq_len,
44
+ truncation=True,
45
+ padding='max_length',
46
+ return_tensors='pt',
47
+ return_attention_mask=True
48
+ )
49
+ if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
50
+ inputs['input_ids'][0][(inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
51
+ if not (inputs['input_ids'][0] >= 0).all():
52
+ inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
53
+
54
+ if self.cache_tokenized:
55
+ self.inputs.append(inputs)
56
+ if len(self.inputs) == len(self.texts):
57
+ self.is_pre_tokenized = True
58
+ if self.cache_remove_text:
59
+ del self.texts
60
+ self.texts = None
61
+
62
+ return inputs
63
+
64
+ def get_subset(self, size: float, from_start: bool = False, use_hf_select: bool = False, **kwargs) -> "BaseDataset":
65
+ split_point = int(len(self.texts) * ((1 - size) if not from_start else size))
66
+ if use_hf_select and not isinstance(self.texts, list):
67
+ subset = self.texts.select(range(split_point, len(self.texts)) if not from_start else range(split_point))
68
+ self.texts = self.texts.select(range(split_point) if not from_start else range(split_point, len(self.texts)))
69
+ else:
70
+ subset = self.texts[split_point:] if not from_start else self.texts[:split_point]
71
+ self.texts = self.texts[:split_point] if not from_start else self.texts[split_point:]
72
+ return self.__class__(subset, self.tokenizer, self.max_seq_len, self.hf_field, **kwargs)
73
+
74
+ def pre_tokenize(self, remove_texts: bool = True):
75
+ if not self.is_pre_tokenized:
76
+ self.inputs = list(map(lambda idx: self.get_tokenized_text(idx), range(len(self.texts))))
77
+ self.is_pre_tokenized = True
78
+ if remove_texts:
79
+ del self.texts
80
+ self.texts = None
46
81
 
47
82
  @classmethod
48
83
  def from_hf_hub(
@@ -132,6 +167,7 @@ class BaseDataset(Dataset):
132
167
  return cls(hf_dataset, tokenizer, max_seq_len=max_seq_len, hf_field=target_field, **kwargs)
133
168
 
134
169
 
170
+
135
171
  class JointLMDataset(BaseDataset):
136
172
  def __init__(
137
173
  self,
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes