optimum-rbln 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. optimum/rbln/__init__.py +10 -7
  2. optimum/rbln/__version__.py +1 -1
  3. optimum/rbln/diffusers/models/autoencoder_kl.py +0 -2
  4. optimum/rbln/diffusers/models/controlnet.py +0 -6
  5. optimum/rbln/diffusers/models/unet_2d_condition.py +0 -3
  6. optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +4 -0
  7. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +18 -20
  8. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +18 -20
  9. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +19 -34
  10. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +20 -35
  11. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +12 -13
  12. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +12 -14
  13. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +13 -14
  14. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +13 -14
  15. optimum/rbln/modeling_alias.py +4 -9
  16. optimum/rbln/modeling_base.py +105 -139
  17. optimum/rbln/modeling_config.py +51 -0
  18. optimum/rbln/transformers/__init__.py +8 -0
  19. optimum/rbln/transformers/models/__init__.py +4 -1
  20. optimum/rbln/transformers/models/auto/modeling_auto.py +1 -0
  21. optimum/rbln/transformers/models/bart/__init__.py +1 -1
  22. optimum/rbln/transformers/models/bart/bart_architecture.py +18 -12
  23. optimum/rbln/transformers/models/bart/modeling_bart.py +25 -6
  24. optimum/rbln/transformers/models/bert/modeling_bert.py +1 -2
  25. optimum/rbln/transformers/models/clip/modeling_clip.py +0 -1
  26. optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +172 -100
  27. optimum/rbln/transformers/models/dpt/modeling_dpt.py +0 -1
  28. optimum/rbln/transformers/models/exaone/__init__.py +32 -0
  29. optimum/rbln/transformers/models/exaone/exaone_architecture.py +72 -0
  30. optimum/rbln/transformers/models/exaone/hf_hub_cached/configuration_exaone.py +181 -0
  31. optimum/rbln/transformers/models/exaone/hf_hub_cached/modeling_exaone.py +1725 -0
  32. optimum/rbln/transformers/models/exaone/modeling_exaone.py +78 -0
  33. optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +148 -152
  34. optimum/rbln/transformers/models/midm/modeling_midm.py +5 -0
  35. optimum/rbln/transformers/models/qwen2/__init__.py +24 -0
  36. optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +67 -0
  37. optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +29 -0
  38. optimum/rbln/transformers/models/seq2seq/__init__.py +24 -0
  39. optimum/rbln/{modeling_seq2seq.py → transformers/models/seq2seq/modeling_seq2seq.py} +107 -166
  40. optimum/rbln/transformers/models/t5/__init__.py +1 -0
  41. optimum/rbln/transformers/models/t5/modeling_t5.py +55 -0
  42. optimum/rbln/transformers/models/t5/t5_architecture.py +46 -32
  43. optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +0 -1
  44. optimum/rbln/transformers/models/whisper/modeling_whisper.py +37 -12
  45. optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +1 -2
  46. optimum/rbln/utils/import_utils.py +14 -0
  47. optimum/rbln/utils/logging.py +1 -1
  48. optimum/rbln/utils/runtime_utils.py +1 -1
  49. optimum/rbln/utils/timer_utils.py +26 -2
  50. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/METADATA +4 -3
  51. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/RECORD +54 -44
  52. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/WHEEL +1 -1
  53. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/entry_points.txt +0 -0
  54. {optimum_rbln-0.1.11.dist-info → optimum_rbln-0.1.12.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,72 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Portions of this software are licensed under the Apache License,
16
+ # Version 2.0. See the NOTICE file distributed with this work for
17
+ # additional information regarding copyright ownership.
18
+
19
+ # All other portions of this software, including proprietary code,
20
+ # are the intellectual property of Rebellions Inc. and may not be
21
+ # copied, modified, or distributed without prior written permission
22
+ # from Rebellions Inc.
23
+
24
+
25
+ from ...models.decoderonly import (
26
+ DecoderOnlyAttention,
27
+ DecoderOnlyDecoderLayer,
28
+ DecoderOnlyModel,
29
+ DecoderOnlyWrapper,
30
+ )
31
+
32
+
33
+ class ExaoneForCausalLMWrapper(DecoderOnlyWrapper):
34
+ """A wrapper class for the Exaone model with a language modeling head."""
35
+
36
+ def __init__(self, model, max_seq_len):
37
+ super(DecoderOnlyWrapper, self).__init__()
38
+ self.config = model.config
39
+ self.model = self.convert_attribute_name(model.transformer)
40
+ self.lm_head = model.lm_head
41
+ self.head_dim = self.config.hidden_size // self.config.num_attention_heads
42
+ self.max_position_embeddings = (
43
+ self.config.max_position_embeddings if max_seq_len > self.config.max_position_embeddings else max_seq_len
44
+ )
45
+ self.max_seq_len = max_seq_len
46
+ self.rope_scaling = getattr(self.config, "rope_scaling", None)
47
+ self.rotary_emb = self._init_rope()
48
+
49
+ @staticmethod
50
+ def convert_attribute_name(model):
51
+ model.embed_tokens = model.wte
52
+ model.norm = model.ln_f
53
+ model.layers = model.h
54
+
55
+ for layer in model.layers:
56
+ layer.input_layernorm = layer.ln_1
57
+ layer.self_attn = layer.attn.attention
58
+ layer.post_attention_layernorm = layer.ln_2
59
+ layer.self_attn.o_proj = layer.self_attn.out_proj
60
+
61
+ return model
62
+
63
+ def get_forward_dict(self):
64
+ forward_dict = {}
65
+ forward_dict.update(
66
+ {
67
+ "wrapper": DecoderOnlyModel.forward,
68
+ "model": DecoderOnlyDecoderLayer.forward,
69
+ "decoder_layer": DecoderOnlyAttention.forward,
70
+ }
71
+ )
72
+ return forward_dict
@@ -0,0 +1,181 @@
1
+ # coding=utf-8
2
+ # Copyright 2021 The LG AI Research EXAONE Lab. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """EXAONE model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ EXAONE_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ class ExaoneConfig(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a :class:`~transformers.ExaoneModel`. It is used to
29
+ instantiate a EXAONE model according to the specified arguments, defining the model architecture. Instantiating a
30
+ configuration with the defaults will yield a similar configuration to that of the Exaone
31
+ Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
32
+ outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
33
+ Args:
34
+ vocab_size (:obj:`int`, `optional`, defaults to 102400):
35
+ Vocabulary size of the EXAONE model. Defines the number of different tokens that can be represented by the
36
+ :obj:`inputs_ids` passed when calling :class:`~transformers.ExaoneModel`. Vocabulary size of the model.
37
+ Defines the different tokens that can be represented by the `inputs_ids` passed to the forward method of
38
+ :class:`~transformers.EXAONEModel`.
39
+ max_position_embeddings (:obj:`int`, `optional`, defaults to 2048):
40
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
41
+ just in case (e.g., 512 or 1024 or 2048).
42
+ hidden_size (:obj:`int`, `optional`, defaults to 2048):
43
+ Dimensionality of the encoder layers and the pooler layer.
44
+ num_layers (:obj:`int`, `optional`, defaults to 32):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (:obj:`int`, `optional`, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer decoder.
48
+ num_key_value_heads (:obj:`int`, `optional`):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
55
+ `num_attention_heads`.
56
+ intermediate_size (:obj:`int`, `optional`, defaults to `hidden_size * 4`):
57
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
58
+ activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ rope_theta (:obj:`float`, `optional`, defaults to 10000.0):
61
+ The base period of the RoPE embeddings.
62
+ rope_scaling (:obj:`Dict`, `optional`):
63
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
64
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
65
+ accordingly.
66
+ Expected contents:
67
+ `rope_type` (:obj:`str`):
68
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
69
+ 'llama3'], with 'default' being the original RoPE implementation.
70
+ `factor` (:obj:`float`, `optional`):
71
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
72
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
73
+ original maximum pre-trained length.
74
+ `original_max_position_embeddings` (:obj:`int`, `optional`):
75
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
76
+ pretraining.
77
+ `attention_factor` (:obj:`float`, `optional`):
78
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
79
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
80
+ `factor` field to infer the suggested value.
81
+ `beta_fast` (:obj:`float`, `optional`):
82
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
83
+ ramp function. If unspecified, it defaults to 32.
84
+ `beta_slow` (:obj:`float`, `optional`):
85
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
86
+ ramp function. If unspecified, it defaults to 1.
87
+ `short_factor` (:obj:`List[float]`, `optional`):
88
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
89
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
90
+ size divided by the number of attention heads divided by 2
91
+ `long_factor` (:obj:`List[float]`, `optional`):
92
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
93
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
94
+ size divided by the number of attention heads divided by 2
95
+ `low_freq_factor` (:obj:`float`, `optional`):
96
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
97
+ `high_freq_factor` (:obj:`float`, `optional`):
98
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
99
+ embed_dropout (:obj:`float`, `optional`, defaults to 0.0):
100
+ The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
101
+ attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
102
+ The dropout ratio for the attention probabilities.
103
+ layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5):
104
+ The epsilon used by the layer normalization layers.
105
+ initializer_range (:obj:`float`, `optional`, defaults to 0.02):
106
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
107
+ use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
108
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
109
+ relevant if ``config.is_decoder=True``.
110
+ bos_token_id (:obj:`int`, `optional`, defaults to 0):
111
+ Beginning of stream token id.
112
+ eos_token_id (:obj:`int`, `optional`, defaults to 2):
113
+ End of stream token id.
114
+ tie_word_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
115
+ Whether to tie weight embeddings
116
+ gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
117
+ If True, use gradient checkpointing to save memory at the expense of slower backward pass.
118
+ Example::
119
+ >>> from transformers import EXAONEModel, ExaoneConfig
120
+ >>> # Initializing a EXAONE configuration
121
+ >>> configuration = ExaoneConfig()
122
+ >>> # Initializing a model from configuration
123
+ >>> model = EXAONEModel(configuration)
124
+ >>> # Accessing the model configuration
125
+ >>> configuration = model.config
126
+ """
127
+
128
+ model_type = "exaone"
129
+ keys_to_ignore_at_inference = ["past_key_values"]
130
+ attribute_map = {"num_hidden_layers": "num_layers"}
131
+
132
+ def __init__(
133
+ self,
134
+ vocab_size=102400,
135
+ max_position_embeddings=2048,
136
+ hidden_size=2048,
137
+ num_layers=32,
138
+ num_attention_heads=32,
139
+ num_key_value_heads=None,
140
+ intermediate_size=None,
141
+ activation_function="silu",
142
+ rope_theta=10000.0,
143
+ rope_scaling=None,
144
+ embed_dropout=0.0,
145
+ attention_dropout=0.0,
146
+ layer_norm_epsilon=1e-5,
147
+ initializer_range=0.02,
148
+ use_cache=True,
149
+ bos_token_id=0,
150
+ eos_token_id=2,
151
+ tie_word_embeddings=True,
152
+ **kwargs,
153
+ ):
154
+ self.vocab_size = vocab_size
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.hidden_size = hidden_size
157
+ self.num_layers = num_layers
158
+ self.num_attention_heads = num_attention_heads
159
+ self.num_hidden_layers = num_layers
160
+ if num_key_value_heads is None:
161
+ num_key_value_heads = num_attention_heads
162
+ self.num_key_value_heads = num_key_value_heads
163
+ if intermediate_size:
164
+ self.intermediate_size = intermediate_size
165
+ else:
166
+ self.intermediate_size = hidden_size * 4
167
+ self.activation_function = activation_function
168
+ self.embed_dropout = embed_dropout
169
+ self.attention_dropout = attention_dropout
170
+ self.layer_norm_epsilon = layer_norm_epsilon
171
+ self.initializer_range = initializer_range
172
+ self.use_cache = use_cache
173
+ self.rope_theta = rope_theta
174
+ self.rope_scaling = rope_scaling
175
+
176
+ self.bos_token_id = bos_token_id
177
+ self.eos_token_id = eos_token_id
178
+
179
+ super().__init__(
180
+ bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
181
+ )