rxnn 0.1.75__py3-none-any.whl → 0.1.77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rxnn/rxt/models.py CHANGED
@@ -97,13 +97,13 @@ class RxTAlphaComponentBase(nn.Module, PyTorchModelHubMixin):
97
97
  if cross_att_type in ['mha', 'gqa', 'mqa']:
98
98
  cross_att_init = lambda: init_attention(embed_dim, att_heads, cross_att_type, att_groups, rope=rope,
99
99
  use_flash_attention=use_flash_attention, dropout=att_dropout,
100
- max_seq_len=seq_len, is_causal=is_causal)
100
+ max_seq_len=seq_len, is_causal=is_causal, rope_only_for_query=True)
101
101
  else:
102
102
  cross_att_init = lambda: init_experimental_attention(embed_dim, att_heads, cross_att_type, cross_att_groups or att_groups, rope=rope,
103
103
  use_flash_attention=use_flash_attention, dropout=att_dropout,
104
104
  max_seq_len=seq_len, is_causal=is_causal, num_experts=att_experts,
105
105
  num_query_experts=att_query_experts,
106
- num_query_groups=cross_att_query_groups or att_query_groups)
106
+ num_query_groups=cross_att_query_groups or att_query_groups, rope_only_for_query=True)
107
107
 
108
108
  layers = nn.ModuleList([
109
109
  ReactiveTransformerLayer(
rxnn/training/dataset.py CHANGED
@@ -16,6 +16,8 @@ class BaseDataset(Dataset):
16
16
  hf_field: str = 'text',
17
17
  cache_tokenized: bool = False,
18
18
  cache_remove_text: bool = True,
19
+ tokenize_in_background: bool = False,
20
+ batch_size: int = 1,
19
21
  *args,
20
22
  **kwargs
21
23
  ):
@@ -28,6 +30,17 @@ class BaseDataset(Dataset):
28
30
  self.cache_tokenized = cache_tokenized
29
31
  self.cache_remove_text = cache_remove_text
30
32
  self.inputs = []
33
+ self.is_txt_list = isinstance(self.texts, list)
34
+ self.tokenize_in_background = tokenize_in_background
35
+ self.bg_next = []
36
+ self.bg_queue = None
37
+ self.batch_size = batch_size
38
+ self.last_idx = 0
39
+ if tokenize_in_background:
40
+ for i in range(self.batch_size):
41
+ self.bg_next.append(self.get_tokenized_text(i))
42
+ self.last_idx = self.batch_size - 1
43
+
31
44
 
32
45
  def __len__(self):
33
46
  return len(self.texts if not self.is_pre_tokenized else self.inputs)
@@ -35,10 +48,50 @@ class BaseDataset(Dataset):
35
48
  def get_tokenized_text(self, idx: int, txt: str = None):
36
49
  if self.is_pre_tokenized:
37
50
  return self.inputs[idx]
51
+ elif self.tokenize_in_background:
52
+ if idx == self.last_idx - self.batch_size:
53
+ if self.bg_queue is not None:
54
+ self.bg_next = self.bg_queue
55
+ self.bg_queue = None
56
+ # TODO: schedule tokenizing a batch in background
57
+ elif idx == self.last_idx:
58
+ item = self.bg_next[idx]
59
+ self.bg_next = []
60
+ return item
61
+
62
+ if idx <= self.last_idx:
63
+ if self.bg_queue is not None:
64
+ self.bg_next = self.bg_queue
65
+ self.bg_queue = None
66
+
67
+ new_idx = idx - (self.last_idx - self.batch_size)
68
+ if new_idx in self.bg_next:
69
+ return self.bg_next[new_idx]
70
+ else:
71
+ if self.is_txt_list:
72
+ text = self.texts[idx]
73
+ else:
74
+ text = self.texts[idx][self.hf_field]
75
+
76
+ inputs = self.tokenizer(
77
+ text,
78
+ max_length=self.max_seq_len,
79
+ truncation=True,
80
+ padding='max_length',
81
+ return_tensors='pt',
82
+ return_attention_mask=True
83
+ )
84
+ if not (inputs['input_ids'][0] < self.tokenizer.vocab_size).all():
85
+ inputs['input_ids'][0][
86
+ (inputs['input_ids'][0] >= self.tokenizer.vocab_size)] = self.tokenizer.unk_token_id
87
+ if not (inputs['input_ids'][0] >= 0).all():
88
+ inputs['input_ids'][0][inputs['input_ids'][0] < 0] = self.tokenizer.unk_token_id
89
+
90
+ return inputs
38
91
  else:
39
- if txt:
92
+ if txt is not None:
40
93
  text = txt
41
- elif isinstance(self.texts, list):
94
+ elif self.is_txt_list:
42
95
  text = self.texts[idx]
43
96
  else:
44
97
  text = self.texts[idx][self.hf_field]
@@ -92,22 +145,14 @@ class BaseDataset(Dataset):
92
145
  """
93
146
  if not self.is_pre_tokenized:
94
147
  num_texts = len(self.texts)
95
- is_txt_list = isinstance(self.texts, list)
96
- if is_txt_list or map_hf_ds_to_list:
97
- txts = self.texts if is_txt_list else self.texts.to_list()
98
- del self.texts
99
- self.texts = None
100
- for index in range(num_texts):
101
- item = txts.pop() if is_txt_list else txts.pop()[self.hf_field]
102
- self.inputs.append(self.get_tokenized_text(index, txt=item))
103
- if verbose and index % log_interval == 0:
104
- print(f'Processed {index + 1}/{num_texts}')
105
- else:
106
- for index in range(num_texts):
107
- self.inputs.append(self.get_tokenized_text(index))
108
- del self.texts[index]
109
- if verbose and index % log_interval == 0:
110
- print(f'Processed {index + 1}/{num_texts}')
148
+ txts = self.texts if self.is_txt_list else self.texts.to_list()
149
+ del self.texts
150
+ self.texts = None
151
+ for index in range(num_texts):
152
+ item = txts.pop() if self.is_txt_list else txts.pop()[self.hf_field]
153
+ self.inputs.append(self.get_tokenized_text(index, txt=item))
154
+ if verbose and index % log_interval == 0:
155
+ print(f'Processed {index + 1}/{num_texts}')
111
156
  self.is_pre_tokenized = True
112
157
 
113
158
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rxnn
3
- Version: 0.1.75
3
+ Version: 0.1.77
4
4
  Summary: RxNN: Reactive Neural Networks Platform
5
5
  License: Apache-2.0
6
6
  Keywords: deep-learning,ai,machine-learning
@@ -7,12 +7,12 @@ rxnn/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  rxnn/memory/norm.py,sha256=Ofl8Q5NYEF9GQeO0bhM43tkTW91J0y6TSvTAOYMgloM,6278
8
8
  rxnn/memory/stm.py,sha256=EsD8slSP4_9dLuq6aFPDmuFe8PWilxh90so5Z3nm-ig,2057
9
9
  rxnn/rxt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- rxnn/rxt/models.py,sha256=9xJfb1rH7-QVO6PRsvUcbhskb1K7JTcE2ChwR4qT4EY,8711
10
+ rxnn/rxt/models.py,sha256=iUlSvdXrD1NVzZFmdC55qp4_3xoJj31FC40BGgYlf4Q,8763
11
11
  rxnn/training/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  rxnn/training/base.py,sha256=xPMA2Bg9-oUZvSZg67ls2p7Gk9pZ9IHUiIJwUzSe2K8,11766
13
13
  rxnn/training/bml.py,sha256=S1ZaXTybzeJH7uVFamCr4TPl2bLyZ5xmn_lSsjThTiM,19162
14
14
  rxnn/training/callbacks.py,sha256=_YfMKY_eFdc-tubhO9nYH2PXDZDQwlSI74FVOoCXpQg,22108
15
- rxnn/training/dataset.py,sha256=TbtgS5_DV_tfpUI-azBcsZUuPYvlXlBQSmtThsF4xpc,12925
15
+ rxnn/training/dataset.py,sha256=Nqt1uRi3i89R7blS2vfuZ-fXY-OTMPTPlKjlffentOA,14761
16
16
  rxnn/training/scheduler.py,sha256=ow6oALzWjWQmHSpcJEjv6tg4g4CDMvr73TypxfcefMc,712
17
17
  rxnn/training/tokenizer.py,sha256=umaLByMBx_NMrQElA45HLm9gkuzyKWDTFaKVd-CjXl0,8344
18
18
  rxnn/transformers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -25,7 +25,7 @@ rxnn/transformers/moe.py,sha256=j6jEx6Ip0zttlUZKKn82azxo95lkLZs-H2GLSMD88hY,5859
25
25
  rxnn/transformers/positional.py,sha256=2l38RS0Dini3f6Z3LUHr3XwWzg1UK7fO2C6wazWDAYU,4292
26
26
  rxnn/transformers/sampler.py,sha256=poWBpxg1iuK5gEJtxHkk5VVfS9V48hs2Olqdhy_Gw8c,6548
27
27
  rxnn/utils.py,sha256=d5U8i5ukovgDyqiycc2AoxObTz_eF_bgo2MKvdtJ98s,467
28
- rxnn-0.1.75.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
29
- rxnn-0.1.75.dist-info/METADATA,sha256=Mj2ATQOdwPfT8zslHqMomPw8-jxrUhf1-AarmHfnov4,16579
30
- rxnn-0.1.75.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
31
- rxnn-0.1.75.dist-info/RECORD,,
28
+ rxnn-0.1.77.dist-info/LICENSE,sha256=C8coDFIUYuOcke4JLPwTqahQUCyXyGq6WOaigOkx8tY,11275
29
+ rxnn-0.1.77.dist-info/METADATA,sha256=5E-PtsxCPqzb9AcD0Y9ym2SGUva4pDRS_kPSZW0PCGg,16579
30
+ rxnn-0.1.77.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
31
+ rxnn-0.1.77.dist-info/RECORD,,
File without changes
File without changes