rxnn 0.1.66__tar.gz → 0.1.68__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {rxnn-0.1.66 → rxnn-0.1.68}/PKG-INFO +1 -1
  2. {rxnn-0.1.66 → rxnn-0.1.68}/pyproject.toml +1 -1
  3. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/dataset.py +9 -15
  4. {rxnn-0.1.66 → rxnn-0.1.68}/LICENSE +0 -0
  5. {rxnn-0.1.66 → rxnn-0.1.68}/README.md +0 -0
  6. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/__init__.py +0 -0
  7. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/experimental/__init__.py +0 -0
  8. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/experimental/attention.py +0 -0
  9. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/experimental/models.py +0 -0
  10. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/experimental/moe.py +0 -0
  11. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/memory/__init__.py +0 -0
  12. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/memory/norm.py +0 -0
  13. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/memory/stm.py +0 -0
  14. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/rxt/__init__.py +0 -0
  15. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/rxt/models.py +0 -0
  16. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/__init__.py +0 -0
  17. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/base.py +0 -0
  18. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/bml.py +0 -0
  19. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/callbacks.py +0 -0
  20. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/scheduler.py +0 -0
  21. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/training/tokenizer.py +0 -0
  22. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/__init__.py +0 -0
  23. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/attention.py +0 -0
  24. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/ff.py +0 -0
  25. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/layers.py +0 -0
  26. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/mask.py +0 -0
  27. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/models.py +0 -0
  28. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/moe.py +0 -0
  29. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/positional.py +0 -0
  30. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/transformers/sampler.py +0 -0
  31. {rxnn-0.1.66 → rxnn-0.1.68}/src/rxnn/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.66
3
+ Version: 0.1.68
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "rxnn"
7
- version = "0.1.66"
7
+ version = "0.1.68"
8
8
  description = "RxNN: Reactive Neural Networks Platform"
9
9
 
10
10
  license = "Apache-2.0"
@@ -15,7 +15,7 @@ class BaseDataset(Dataset):
15
15
  max_seq_len: int = 1024,
16
16
  hf_field: str = 'text',
17
17
  cache_tokenized: bool = False,
18
- cache_remove_text: bool = False,
18
+ cache_remove_text: bool = True,
19
19
  *args,
20
20
  **kwargs
21
21
  ):
@@ -29,6 +29,9 @@ class BaseDataset(Dataset):
29
29
  self.cache_remove_text = cache_remove_text
30
30
  self.inputs = [] if self.cache_tokenized else None
31
31
 
32
+ def __len__(self):
33
+ return len(self.texts if not self.is_pre_tokenized else self.inputs)
34
+
32
35
  def get_tokenized_text(self, idx: int):
33
36
  if self.is_pre_tokenized:
34
37
  return self.inputs[idx]
@@ -61,15 +64,15 @@ class BaseDataset(Dataset):
61
64
 
62
65
  return inputs
63
66
 
64
- def get_subset(self, size: float, from_start: bool = False, use_hf_select: bool = False, **kwargs) -> "BaseDataset":
67
+ def get_subset(self, size: float, from_start: bool = False, **kwargs) -> "BaseDataset":
65
68
  split_point = int(len(self.texts) * ((1 - size) if not from_start else size))
66
- if use_hf_select and not isinstance(self.texts, list):
69
+ if not isinstance(self.texts, list):
67
70
  subset = self.texts.select(range(split_point, len(self.texts)) if not from_start else range(split_point))
68
71
  self.texts = self.texts.select(range(split_point) if not from_start else range(split_point, len(self.texts)))
69
72
  else:
70
- subset = self.texts[split_point:] if not from_start else self.texts[:split_point]
71
- self.texts = self.texts[:split_point] if not from_start else self.texts[split_point:]
72
- return self.__class__(subset, self.tokenizer, self.max_seq_len, self.hf_field, **kwargs)
73
+ subset = self.texts[split_point:-1] if not from_start else self.texts[0:split_point]
74
+ self.texts = self.texts[0:split_point] if not from_start else self.texts[split_point:-1]
75
+ return self.__class__(subset, self.tokenizer, max_seq_len=self.max_seq_len, hf_field=self.hf_field, **kwargs)
73
76
 
74
77
  def pre_tokenize(self, remove_texts: bool = True):
75
78
  if not self.is_pre_tokenized:
@@ -213,9 +216,6 @@ class JointLMDataset(BaseDataset):
213
216
  'attention_mask': attention_mask,
214
217
  }
215
218
 
216
- def __len__(self):
217
- return len(self.texts)
218
-
219
219
 
220
220
  class MaskedLMDataset(BaseDataset):
221
221
  def __init__(
@@ -253,9 +253,6 @@ class MaskedLMDataset(BaseDataset):
253
253
  'labels': labels
254
254
  }
255
255
 
256
- def __len__(self):
257
- return len(self.texts)
258
-
259
256
 
260
257
  class AutoregressiveLMDataset(BaseDataset):
261
258
  def __init__(
@@ -281,6 +278,3 @@ class AutoregressiveLMDataset(BaseDataset):
281
278
  'attention_mask': attention_mask,
282
279
  'targets': targets
283
280
  }
284
-
285
- def __len__(self):
286
- return len(self.texts)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes