optimum-rbln 0.1.1__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. optimum/rbln/__init__.py +9 -0
  2. optimum/rbln/__version__.py +1 -1
  3. optimum/rbln/diffusers/models/autoencoder_kl.py +16 -98
  4. optimum/rbln/diffusers/models/unet_2d_condition.py +1 -1
  5. optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +9 -11
  6. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +8 -0
  7. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +9 -0
  8. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +3 -0
  9. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +8 -0
  10. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +8 -0
  11. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +9 -0
  12. optimum/rbln/modeling_base.py +175 -103
  13. optimum/rbln/modeling_seq2seq.py +58 -132
  14. optimum/rbln/transformers/__init__.py +4 -0
  15. optimum/rbln/transformers/models/__init__.py +2 -0
  16. optimum/rbln/transformers/models/clip/modeling_clip.py +0 -1
  17. optimum/rbln/transformers/models/dpt/__init__.py +24 -0
  18. optimum/rbln/transformers/models/dpt/modeling_dpt.py +89 -0
  19. optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +24 -33
  20. optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +52 -124
  21. optimum/rbln/transformers/models/llama/llama_architecture.py +62 -33
  22. optimum/rbln/transformers/models/llama/llama_architecture_cb.py +764 -0
  23. optimum/rbln/transformers/models/llama/modeling_llama.py +208 -140
  24. optimum/rbln/transformers/models/midm/__init__.py +32 -0
  25. optimum/rbln/transformers/models/midm/hf_hub_cached/configuration_midm.py +22 -0
  26. optimum/rbln/transformers/models/midm/hf_hub_cached/midm_bitext_tokenization.py +303 -0
  27. optimum/rbln/transformers/models/midm/hf_hub_cached/modeling_midm.py +1473 -0
  28. optimum/rbln/transformers/models/midm/hf_hub_cached/rotary_position_embedding.py +98 -0
  29. optimum/rbln/transformers/models/midm/midm_architecture.py +506 -0
  30. optimum/rbln/transformers/models/midm/modeling_midm.py +390 -0
  31. optimum/rbln/transformers/models/whisper/modeling_whisper.py +53 -123
  32. optimum/rbln/utils/__init__.py +1 -1
  33. optimum/rbln/utils/import_utils.py +46 -0
  34. {optimum_rbln-0.1.1.dist-info → optimum_rbln-0.1.7.dist-info}/METADATA +17 -50
  35. {optimum_rbln-0.1.1.dist-info → optimum_rbln-0.1.7.dist-info}/RECORD +37 -27
  36. {optimum_rbln-0.1.1.dist-info → optimum_rbln-0.1.7.dist-info}/WHEEL +1 -1
  37. {optimum_rbln-0.1.1.dist-info → optimum_rbln-0.1.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,303 @@
1
+ # coding=utf-8
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ """ Tokenization class for model Midm_bitext_tonkenizer."""
14
+ import os
15
+ import re
16
+ import warnings
17
+ from shutil import copyfile
18
+ from typing import Any, Dict, List, Optional, Tuple
19
+
20
+ import sentencepiece as spm
21
+ from transformers.tokenization_utils import PreTrainedTokenizer
22
+ from transformers.utils import logging
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {"vocab_file": "midm_bitext_tokenizer.model"}
28
+
29
+ PRETRAINED_VOCAB_FILES_MAP = {}
30
+
31
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
32
+
33
+
34
+ class Midm_bitext_Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a Midm bitext tonkenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
37
+
38
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
39
+ this superclass for more information regarding those methods.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
44
+ contains the vocabulary necessary to instantiate a tokenizer.
45
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
46
+ The end of sequence token.
47
+
48
+ <Tip>
49
+
50
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
51
+ The token used is the `sep_token`.
52
+
53
+ </Tip>
54
+
55
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
56
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
57
+ token instead.
58
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
59
+ The token used for padding, for example when batching sequences of different lengths.
60
+ extra_ids (`int`, *optional*, defaults to 100):
61
+ Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
62
+ accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
63
+ indexed from the end of the vocabulary up to beginning.
64
+ additional_special_tokens (`List[str]`, *optional*):
65
+ Additional special tokens used by the tokenizer.
66
+ sp_model_kwargs (`dict`, *optional*):
67
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
68
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
69
+ to set:
70
+
71
+ - `enable_sampling`: Enable subword regularization.
72
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
73
+
74
+ - `nbest_size = {0,1}`: No sampling is performed.
75
+ - `nbest_size > 1`: samples from the nbest_size results.
76
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
77
+ using forward-filtering-and-backward-sampling algorithm.
78
+
79
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
80
+ BPE-dropout.
81
+
82
+ Attributes:
83
+ sp_model (`SentencePieceProcessor`):
84
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
85
+ """
86
+
87
+ vocab_files_names = VOCAB_FILES_NAMES
88
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
89
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
90
+ model_input_names = ["input_ids", "attention_mask"]
91
+
92
+ def __init__(
93
+ self,
94
+ vocab_file,
95
+ eos_token="</s>",
96
+ unk_token="<unk>",
97
+ pad_token="<pad>",
98
+ extra_ids=100,
99
+ additional_special_tokens=None,
100
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
101
+ **kwargs,
102
+ ) -> None:
103
+ # Add extra_ids to the special token list
104
+ if extra_ids > 0 and additional_special_tokens is None:
105
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
106
+ elif extra_ids > 0 and additional_special_tokens is not None:
107
+ # Check that we have the right number of extra_id special tokens
108
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
109
+ if extra_tokens != extra_ids:
110
+ raise ValueError(
111
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to Midm_bitext_Tonkenizer. "
112
+ "In this case the additional_special_tokens must include the extra_ids tokens"
113
+ )
114
+
115
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
116
+
117
+ # custom special tokens
118
+ # convert \n, \t in input text -> <[!newline]>, <[!tab]>
119
+ self.newline_token = "<[!newline]>"
120
+ self.tab_token = "<[!tab]>"
121
+
122
+ self.vocab_file = vocab_file
123
+ self._extra_ids = extra_ids
124
+
125
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
126
+ self.sp_model.Load(vocab_file)
127
+ super().__init__(
128
+ eos_token=eos_token,
129
+ unk_token=unk_token,
130
+ pad_token=pad_token,
131
+ extra_ids=extra_ids,
132
+ additional_special_tokens=additional_special_tokens,
133
+ sp_model_kwargs=self.sp_model_kwargs,
134
+ **kwargs,
135
+ )
136
+
137
+ @property
138
+ def vocab_size(self):
139
+ return self.sp_model.get_piece_size() + self._extra_ids
140
+
141
+ def get_vocab(self):
142
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
143
+ vocab.update(self.added_tokens_encoder)
144
+ return vocab
145
+
146
+ def get_special_tokens_mask(
147
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
148
+ ) -> List[int]:
149
+ """
150
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
151
+ special tokens using the tokenizer `prepare_for_model` method.
152
+
153
+ Args:
154
+ token_ids_0 (`List[int]`):
155
+ List of IDs.
156
+ token_ids_1 (`List[int]`, *optional*):
157
+ Optional second list of IDs for sequence pairs.
158
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
159
+ Whether or not the token list is already formatted with special tokens for the model.
160
+
161
+ Returns:
162
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
163
+ """
164
+ if already_has_special_tokens:
165
+ return super().get_special_tokens_mask(
166
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
167
+ )
168
+
169
+ # normal case: some special tokens
170
+ if token_ids_1 is None:
171
+ return ([0] * len(token_ids_0)) + [1]
172
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
173
+
174
+ def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
175
+ """Do not add eos again if user already added it."""
176
+ if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
177
+ warnings.warn(
178
+ f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added."
179
+ )
180
+ return token_ids
181
+ else:
182
+ return token_ids + [self.eos_token_id]
183
+
184
+ def create_token_type_ids_from_sequences(
185
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
186
+ ) -> List[int]:
187
+ """
188
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Midm does not make
189
+ use of token type ids, therefore a list of zeros is returned.
190
+
191
+ Args:
192
+ token_ids_0 (`List[int]`):
193
+ List of IDs.
194
+ token_ids_1 (`List[int]`, *optional*):
195
+ Optional second list of IDs for sequence pairs.
196
+
197
+ Returns:
198
+ `List[int]`: List of zeros.
199
+ """
200
+ eos = [self.eos_token_id]
201
+
202
+ if token_ids_1 is None:
203
+ return len(token_ids_0 + eos) * [0]
204
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
205
+
206
+ def build_inputs_with_special_tokens(
207
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
208
+ ) -> List[int]:
209
+ """
210
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
211
+ adding special tokens. A sequence has the following format:
212
+
213
+ - single sequence: `X </s>`
214
+ - pair of sequences: `A </s> B </s>`
215
+
216
+ Args:
217
+ token_ids_0 (`List[int]`):
218
+ List of IDs to which the special tokens will be added.
219
+ token_ids_1 (`List[int]`, *optional*):
220
+ Optional second list of IDs for sequence pairs.
221
+
222
+ Returns:
223
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
224
+ """
225
+ token_ids_0 = self._add_eos_if_not_present(token_ids_0)
226
+ if token_ids_1 is None:
227
+ return token_ids_0
228
+ else:
229
+ token_ids_1 = self._add_eos_if_not_present(token_ids_1)
230
+ return token_ids_0 + token_ids_1
231
+
232
+ def __getstate__(self):
233
+ state = self.__dict__.copy()
234
+ state["sp_model"] = None
235
+ return state
236
+
237
+ def __setstate__(self, d):
238
+ self.__dict__ = d
239
+
240
+ # for backward compatibility
241
+ if not hasattr(self, "sp_model_kwargs"):
242
+ self.sp_model_kwargs = {}
243
+
244
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
245
+ self.sp_model.Load(self.vocab_file)
246
+
247
+ def _tokenize(self, text: str) -> List[str]:
248
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
249
+ text = text.replace("\n", self.newline_token)
250
+ text = text.replace("\t", self.tab_token)
251
+
252
+ return self.sp_model.encode(text, out_type=str)
253
+
254
+ def _convert_token_to_id(self, token):
255
+ """Converts a token (str) in an id using the vocab."""
256
+ if token.startswith("<extra_id_"):
257
+ match = re.match(r"<extra_id_(\d+)>", token)
258
+ num = int(match.group(1))
259
+ return self.vocab_size - num - 1
260
+ return self.sp_model.piece_to_id(token)
261
+
262
+ def _convert_id_to_token(self, index):
263
+ """Converts an index (integer) in a token (str) using the vocab."""
264
+ if index < self.sp_model.get_piece_size():
265
+ token = self.sp_model.IdToPiece(index)
266
+ else:
267
+ token = f"<extra_id_{self.vocab_size - 1 - index}>"
268
+ return token
269
+
270
+ def convert_tokens_to_string(self, tokens):
271
+ """Converts a sequence of tokens (string) in a single string."""
272
+ current_sub_tokens = []
273
+ out_string = ""
274
+ for token in tokens:
275
+ # make sure that special tokens are not decoded using sentencepiece model
276
+ if token in self.all_special_tokens:
277
+ out_string += self.sp_model.decode_pieces(current_sub_tokens) + token + " "
278
+ current_sub_tokens = []
279
+ else:
280
+ current_sub_tokens.append(token)
281
+ out_string += self.sp_model.decode_pieces(current_sub_tokens)
282
+
283
+ out_string.replace(self.newline_token, "\n")
284
+ out_string.replace(self.tab_token, "\t")
285
+
286
+ return out_string.strip()
287
+
288
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
289
+ if not os.path.isdir(save_directory):
290
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
291
+ return
292
+ out_vocab_file = os.path.join(
293
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
294
+ )
295
+
296
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
297
+ copyfile(self.vocab_file, out_vocab_file)
298
+ elif not os.path.isfile(self.vocab_file):
299
+ with open(out_vocab_file, "wb") as fi:
300
+ content_spiece_model = self.sp_model.serialized_model_proto()
301
+ fi.write(content_spiece_model)
302
+
303
+ return (out_vocab_file,)