rxnn 0.1.63__py3-none-any.whl → 0.1.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rxnn/rxt/models.py CHANGED
@@ -31,9 +31,11 @@ class RxTAlphaComponentConfig(TypedDict):
31
31
  moe_top_k: int
32
32
  self_att_type: str
33
33
  cross_att_type: str
34
- att_num_experts: int
35
- att_num_query_experts: int
36
- att_num_query_groups: int
34
+ att_experts: int
35
+ att_query_experts: int
36
+ att_query_groups: int
37
+ cross_att_groups: int
38
+ cross_att_query_groups: int
37
39
 
38
40
 
39
41
  class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
@@ -61,9 +63,11 @@ class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
61
63
  moe_top_k: int = 1,
62
64
  self_att_type: str = 'gqa',
63
65
  cross_att_type: str = 'mqa',
64
- att_num_experts: int = None,
65
- att_num_query_experts: int = None,
66
- att_num_query_groups: int = None,
66
+ att_experts: int = None,
67
+ att_query_experts: int = None,
68
+ att_query_groups: int = None,
69
+ cross_att_groups: int = None,
70
+ cross_att_query_groups: int = None,
67
71
  **kwargs
68
72
  ):
69
73
  super(RxTAlphaComponentBase, self).__init__(**kwargs)
@@ -86,20 +90,20 @@ class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
86
90
  else:
87
91
  att_init = lambda: init_experimental_attention(embed_dim, att_heads, self_att_type, att_groups, rope=rope,
88
92
  use_flash_attention=use_flash_attention, dropout=att_dropout,
89
- max_seq_len=seq_len, is_causal=is_causal, num_experts=att_num_experts,
90
- num_query_experts=att_num_query_experts,
91
- num_query_groups=att_num_query_groups)
93
+ max_seq_len=seq_len, is_causal=is_causal, num_experts=att_experts,
94
+ num_query_experts=att_query_experts,
95
+ num_query_groups=att_query_groups)
92
96
 
93
97
  if cross_att_type in ['mha', 'gqa', 'mqa']:
94
98
  cross_att_init = lambda: init_attention(embed_dim, att_heads, cross_att_type, att_groups, rope=rope,
95
99
  use_flash_attention=use_flash_attention, dropout=att_dropout,
96
100
  max_seq_len=seq_len, is_causal=is_causal)
97
101
  else:
98
- cross_att_init = lambda: init_experimental_attention(embed_dim, att_heads, cross_att_type, att_groups, rope=rope,
102
+ cross_att_init = lambda: init_experimental_attention(embed_dim, att_heads, cross_att_type, cross_att_groups or att_groups, rope=rope,
99
103
  use_flash_attention=use_flash_attention, dropout=att_dropout,
100
- max_seq_len=seq_len, is_causal=is_causal, num_experts=att_num_experts,
101
- num_query_experts=att_num_query_experts,
102
- num_query_groups=att_num_query_groups)
104
+ max_seq_len=seq_len, is_causal=is_causal, num_experts=att_experts,
105
+ num_query_experts=att_query_experts,
106
+ num_query_groups=cross_att_query_groups or att_query_groups)
103
107
 
104
108
  layers = nn.ModuleList([
105
109
  ReactiveTransformerLayer(
rxnn/training/dataset.py CHANGED
@@ -14,6 +14,8 @@ class BaseDataset(Dataset):
14
14
  tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
15
15
  max_seq_len: int = 1024,
16
16
  hf_field: str = 'text',
17
+ cache_tokenized: bool = False,
18
+ cache_remove_text: bool = False,
17
19
  *args,
18
20
  **kwargs
19
21
  ):
@@ -22,27 +24,56 @@ class BaseDataset(Dataset):
22
24
  self.max_seq_len = max_seq_len
23
25
  self.texts = texts
24
26
  self.hf_field = hf_field
27
+ self.is_pre_tokenized = False
28
+ self.cache_tokenized = cache_tokenized
29
+ self.cache_remove_text = cache_remove_text
30
+ self.inputs = [] if self.cache_tokenized else None
25
31
 
26
32
  def get_tokenized_text(self, idx: int):
27
- if isinstance(self.texts, list):
28
- text = self.texts[idx]
33
+ if self.is_pre_tokenized:
34
+ return self.inputs[idx]
29
35
  else:
30
- text = self.texts[idx][self.hf_field]
31
-
32
- inputs = self.tokenizer(
33
- text,
34
- max_length=self.max_seq_len,
35
- truncation=True,
36
- padding='max_length',
37
- return_tensors='pt',
38
- return_attention_mask=True
39
- )
40
- if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
41
- inputs['input_ids'][0][(inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
42
- if not (inputs['input_ids'][0] >= 0).all():
43
- inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
44
-
45
- return inputs
36
+ if isinstance(self.texts, list):
37
+ text = self.texts[idx]
38
+ else:
39
+ text = self.texts[idx][self.hf_field]
40
+
41
+ inputs = self.tokenizer(
42
+ text,
43
+ max_length=self.max_seq_len,
44
+ truncation=True,
45
+ padding='max_length',
46
+ return_tensors='pt',
47
+ return_attention_mask=True
48
+ )
49
+ if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
50
+ inputs['input_ids'][0][(inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
51
+ if not (inputs['input_ids'][0] >= 0).all():
52
+ inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
53
+
54
+ if self.cache_tokenized:
55
+ self.inputs.append(inputs)
56
+ if len(self.inputs) == len(self.texts):
57
+ self.is_pre_tokenized = True
58
+ if self.cache_remove_text:
59
+ del self.texts
60
+ self.texts = None
61
+
62
+ return inputs
63
+
64
+ def get_subset(self, size: float, from_start: bool = False, use_hf_select: bool = False, **kwargs) -> "BaseDataset":
65
+ split_point = int(len(self.texts) * ((1 - size) if not from_start else size))
66
+ subset = self.texts.select(range(split_point, len(self.texts))) if use_hf_select and not isinstance(self.texts, list) else self.texts[:split_point]
67
+ self.texts = self.texts.select(range(split_point)) if use_hf_select and not isinstance(self.texts, list) else self.texts[split_point:]
68
+ return self.__class__(subset, self.tokenizer, self.max_seq_len, self.hf_field, **kwargs)
69
+
70
+ def pre_tokenize(self, remove_texts: bool = True):
71
+ if not self.is_pre_tokenized:
72
+ self.inputs = list(map(lambda idx: self.get_tokenized_text(idx), range(len(self.texts))))
73
+ self.is_pre_tokenized = True
74
+ if remove_texts:
75
+ del self.texts
76
+ self.texts = None
46
77
 
47
78
  @classmethod
48
79
  def from_hf_hub(
@@ -132,6 +163,7 @@ class BaseDataset(Dataset):
132
163
  return cls(hf_dataset, tokenizer, max_seq_len=max_seq_len, hf_field=target_field, **kwargs)
133
164
 
134
165
 
166
+
135
167
  class JointLMDataset(BaseDataset):
136
168
  def __init__(
137
169
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.63
3
+ Version: 0.1.65
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -7,12 +7,12 @@ rxnn/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  rxnn/memory/norm.py,sha256=Ofl8Q5NYEF9GQeO0bhM43tkTW91J0y6TSvTAOYMgloM,6278
8
8
  rxnn/memory/stm.py,sha256=EsD8slSP4_9dLuq6aFPDmuFe8PWilxh90so5Z3nm-ig,2057
9
9
  rxnn/rxt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- rxnn/rxt/models.py,sha256=87KBLbZB7V3NXW_uO2qAQyrPjf2gA2WJrNIFe-e4jdU,8565
10
+ rxnn/rxt/models.py,sha256=9xJfb1rH7-QVO6PRsvUcbhskb1K7JTcE2ChwR4qT4EY,8711
11
11
  rxnn/training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  rxnn/training/base.py,sha256=xPMA2Bg9-oUZvSZg67ls2p7Gk9pZ9IHUiIJwUzSe2K8,11766
13
13
  rxnn/training/bml.py,sha256=S1ZaXTybzeJH7uVFamCr4TPl2bLyZ5xmn_lSsjThTiM,19162
14
14
  rxnn/training/callbacks.py,sha256=_YfMKY_eFdc-tubhO9nYH2PXDZDQwlSI74FVOoCXpQg,22108
15
- rxnn/training/dataset.py,sha256=JQuWSUdT5AnsrG6M_EsewoU6uroVHhg4K715nbtDx8A,9643
15
+ rxnn/training/dataset.py,sha256=fKTNtDOAMSuRPbSl0yy0-k_xQ5o-AF6pqwKw9tC-7Mw,11328
16
16
  rxnn/training/scheduler.py,sha256=ow6oALzWjWQmHSpcJEjv6tg4g4CDMvr73TypxfcefMc,712
17
17
  rxnn/training/tokenizer.py,sha256=umaLByMBx_NMrQElA45HLm9gkuzyKWDTFaKVd-CjXl0,8344
18
18
  rxnn/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -25,7 +25,7 @@ rxnn/transformers/moe.py,sha256=j6jEx6Ip0zttlUZKKn82azxo95lkLZs-H2GLSMD88hY,5859
25
25
  rxnn/transformers/positional.py,sha256=2l38RS0Dini3f6Z3LUHr3XwWzg1UK7fO2C6wazWDAYU,4292
26
26
  rxnn/transformers/sampler.py,sha256=poWBpxg1iuK5gEJtxHkk5VVfS9V48hs2Olqdhy_Gw8c,6548
27
27
  rxnn/utils.py,sha256=d5U8i5ukovgDyqiycc2AoxObTz_eF_bgo2MKvdtJ98s,467
28
- rxnn-0.1.63.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
29
- rxnn-0.1.63.dist-info/METADATA,sha256=xvWmB2ulLIlSHusanls5oj3zwgmoJ_ykk_G8inNLhzY,16579
30
- rxnn-0.1.63.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
31
- rxnn-0.1.63.dist-info/RECORD,,
28
+ rxnn-0.1.65.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
29
+ rxnn-0.1.65.dist-info/METADATA,sha256=wFv7U3aEMscWvxERWl2x-qHO2aBc5LPF79uV9RUzbyQ,16579
30
+ rxnn-0.1.65.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
31
+ rxnn-0.1.65.dist-info/RECORD,,
File without changes
File without changes