optimum-rbln 0.9.3rc0__py3-none-any.whl → 0.9.5a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. optimum/rbln/__init__.py +48 -0
  2. optimum/rbln/__version__.py +2 -2
  3. optimum/rbln/configuration_utils.py +50 -21
  4. optimum/rbln/diffusers/__init__.py +12 -0
  5. optimum/rbln/diffusers/configurations/__init__.py +3 -0
  6. optimum/rbln/diffusers/configurations/models/__init__.py +2 -0
  7. optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_temporal_decoder.py +67 -0
  8. optimum/rbln/diffusers/configurations/models/configuration_unet_spatio_temporal_condition.py +59 -0
  9. optimum/rbln/diffusers/configurations/pipelines/__init__.py +3 -0
  10. optimum/rbln/diffusers/configurations/pipelines/configuration_stable_video_diffusion.py +114 -0
  11. optimum/rbln/diffusers/modeling_diffusers.py +1 -1
  12. optimum/rbln/diffusers/models/__init__.py +17 -3
  13. optimum/rbln/diffusers/models/autoencoders/__init__.py +1 -0
  14. optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_cosmos.py +3 -3
  15. optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +275 -0
  16. optimum/rbln/diffusers/models/autoencoders/vae.py +27 -8
  17. optimum/rbln/diffusers/models/controlnet.py +17 -2
  18. optimum/rbln/diffusers/models/transformers/prior_transformer.py +16 -2
  19. optimum/rbln/diffusers/models/transformers/transformer_cosmos.py +16 -1
  20. optimum/rbln/diffusers/models/transformers/transformer_sd3.py +14 -1
  21. optimum/rbln/diffusers/models/unets/__init__.py +1 -0
  22. optimum/rbln/diffusers/models/unets/unet_2d_condition.py +18 -2
  23. optimum/rbln/diffusers/models/unets/unet_spatio_temporal_condition.py +201 -0
  24. optimum/rbln/diffusers/pipelines/__init__.py +4 -0
  25. optimum/rbln/diffusers/pipelines/auto_pipeline.py +2 -2
  26. optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +20 -0
  27. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +13 -4
  28. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +13 -4
  29. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +13 -4
  30. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +13 -4
  31. optimum/rbln/diffusers/pipelines/cosmos/cosmos_guardrail.py +1 -1
  32. optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +1 -1
  33. optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +1 -2
  34. optimum/rbln/diffusers/pipelines/stable_video_diffusion/__init__.py +15 -0
  35. optimum/rbln/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +46 -0
  36. optimum/rbln/modeling.py +20 -45
  37. optimum/rbln/modeling_base.py +18 -14
  38. optimum/rbln/ops/__init__.py +1 -0
  39. optimum/rbln/ops/attn.py +10 -0
  40. optimum/rbln/ops/flash_attn.py +8 -0
  41. optimum/rbln/ops/moe.py +180 -0
  42. optimum/rbln/ops/sliding_window_attn.py +9 -0
  43. optimum/rbln/transformers/__init__.py +36 -0
  44. optimum/rbln/transformers/configuration_generic.py +0 -27
  45. optimum/rbln/transformers/modeling_attention_utils.py +156 -127
  46. optimum/rbln/transformers/modeling_generic.py +2 -61
  47. optimum/rbln/transformers/modeling_outputs.py +26 -0
  48. optimum/rbln/transformers/modeling_rope_utils.py +78 -42
  49. optimum/rbln/transformers/models/__init__.py +28 -0
  50. optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +28 -2
  51. optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +68 -5
  52. optimum/rbln/transformers/models/auto/auto_factory.py +1 -0
  53. optimum/rbln/transformers/models/bart/bart_architecture.py +24 -24
  54. optimum/rbln/transformers/models/bart/modeling_bart.py +23 -2
  55. optimum/rbln/transformers/models/bert/modeling_bert.py +86 -1
  56. optimum/rbln/transformers/models/blip_2/modeling_blip_2.py +42 -15
  57. optimum/rbln/transformers/models/clip/modeling_clip.py +40 -2
  58. optimum/rbln/transformers/models/colpali/colpali_architecture.py +14 -20
  59. optimum/rbln/transformers/models/colpali/configuration_colpali.py +12 -17
  60. optimum/rbln/transformers/models/colpali/modeling_colpali.py +66 -221
  61. optimum/rbln/transformers/models/colqwen2/configuration_colqwen2.py +38 -23
  62. optimum/rbln/transformers/models/colqwen2/modeling_colqwen2.py +107 -371
  63. optimum/rbln/transformers/models/decoderonly/__init__.py +2 -0
  64. optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py +128 -17
  65. optimum/rbln/transformers/models/decoderonly/configuration_lora.py +2 -2
  66. optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +211 -89
  67. optimum/rbln/transformers/models/decoderonly/decoderonly_runtime_utils.py +205 -64
  68. optimum/rbln/transformers/models/decoderonly/generation_decoderonly.py +17 -9
  69. optimum/rbln/transformers/models/decoderonly/lora_architecture.py +1 -1
  70. optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +194 -132
  71. optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py +17 -0
  72. optimum/rbln/transformers/models/distilbert/modeling_distilbert.py +24 -0
  73. optimum/rbln/transformers/models/dpt/modeling_dpt.py +17 -0
  74. optimum/rbln/transformers/models/exaone/exaone_architecture.py +0 -36
  75. optimum/rbln/transformers/models/gemma/gemma_architecture.py +1 -1
  76. optimum/rbln/transformers/models/gemma2/__init__.py +16 -0
  77. optimum/rbln/transformers/models/gemma2/configuration_gemma2.py +45 -0
  78. optimum/rbln/transformers/models/gemma2/gemma2_architecture.py +83 -0
  79. optimum/rbln/transformers/models/gemma2/modeling_gemma2.py +101 -0
  80. optimum/rbln/transformers/models/gemma3/gemma3_architecture.py +23 -19
  81. optimum/rbln/transformers/models/gemma3/gemma3_runtime_utils.py +42 -70
  82. optimum/rbln/transformers/models/gemma3/modeling_gemma3.py +46 -31
  83. optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +8 -34
  84. optimum/rbln/transformers/models/gpt_oss/__init__.py +16 -0
  85. optimum/rbln/transformers/models/gpt_oss/configuration_gpt_oss.py +41 -0
  86. optimum/rbln/transformers/models/gpt_oss/gpt_oss_architecture.py +122 -0
  87. optimum/rbln/transformers/models/gpt_oss/modeling_gpt_oss.py +165 -0
  88. optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py +8 -5
  89. optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py +7 -5
  90. optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py +24 -9
  91. optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +3 -5
  92. optimum/rbln/transformers/models/llava/modeling_llava.py +37 -26
  93. optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +3 -5
  94. optimum/rbln/transformers/models/midm/midm_architecture.py +29 -22
  95. optimum/rbln/transformers/models/mistral/modeling_mistral.py +0 -22
  96. optimum/rbln/transformers/models/opt/modeling_opt.py +2 -2
  97. optimum/rbln/transformers/models/opt/opt_architecture.py +1 -44
  98. optimum/rbln/transformers/models/paligemma/__init__.py +16 -0
  99. optimum/rbln/transformers/models/paligemma/configuration_paligemma.py +129 -0
  100. optimum/rbln/transformers/models/paligemma/modeling_paligemma.py +564 -0
  101. optimum/rbln/transformers/models/pegasus/modeling_pegasus.py +1 -1
  102. optimum/rbln/transformers/models/pegasus/pegasus_architecture.py +24 -24
  103. optimum/rbln/transformers/models/phi/phi_architecture.py +13 -21
  104. optimum/rbln/transformers/models/pixtral/modeling_pixtral.py +13 -1
  105. optimum/rbln/transformers/models/pixtral/pixtral_architecture.py +2 -2
  106. optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +0 -28
  107. optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +6 -1
  108. optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +11 -1
  109. optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +278 -130
  110. optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +43 -39
  111. optimum/rbln/transformers/models/qwen2_moe/__init__.py +16 -0
  112. optimum/rbln/transformers/models/qwen2_moe/configuration_qwen2_moe.py +38 -0
  113. optimum/rbln/transformers/models/qwen2_moe/modeling_qwen2_moe.py +68 -0
  114. optimum/rbln/transformers/models/qwen2_moe/qwen2_moe_architecture.py +94 -0
  115. optimum/rbln/transformers/models/qwen2_vl/__init__.py +6 -1
  116. optimum/rbln/transformers/models/qwen2_vl/configuration_qwen2_vl.py +11 -1
  117. optimum/rbln/transformers/models/qwen2_vl/modeling_qwen2_vl.py +268 -111
  118. optimum/rbln/transformers/models/qwen2_vl/qwen2_vl_architecture.py +27 -35
  119. optimum/rbln/transformers/models/qwen3/modeling_qwen3.py +0 -20
  120. optimum/rbln/transformers/models/qwen3/qwen3_architecture.py +7 -7
  121. optimum/rbln/transformers/models/qwen3_moe/__init__.py +16 -0
  122. optimum/rbln/transformers/models/qwen3_moe/configuration_qwen3_moe.py +38 -0
  123. optimum/rbln/transformers/models/qwen3_moe/modeling_qwen3_moe.py +68 -0
  124. optimum/rbln/transformers/models/qwen3_moe/qwen3_moe_architecture.py +100 -0
  125. optimum/rbln/transformers/models/resnet/configuration_resnet.py +17 -0
  126. optimum/rbln/transformers/models/resnet/modeling_resnet.py +73 -0
  127. optimum/rbln/transformers/models/roberta/modeling_roberta.py +33 -0
  128. optimum/rbln/transformers/models/seq2seq/configuration_seq2seq.py +2 -4
  129. optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +36 -12
  130. optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +14 -12
  131. optimum/rbln/transformers/models/siglip/modeling_siglip.py +21 -19
  132. optimum/rbln/transformers/models/swin/configuration_swin.py +1 -6
  133. optimum/rbln/transformers/models/swin/modeling_swin.py +17 -4
  134. optimum/rbln/transformers/models/t5/modeling_t5.py +2 -2
  135. optimum/rbln/transformers/models/t5/t5_architecture.py +16 -17
  136. optimum/rbln/transformers/models/time_series_transformer/modeling_time_series_transformer.py +25 -10
  137. optimum/rbln/transformers/models/time_series_transformer/time_series_transformers_architecture.py +0 -3
  138. optimum/rbln/transformers/models/vit/modeling_vit.py +19 -0
  139. optimum/rbln/transformers/models/wav2vec2/configuration_wav2vec2.py +15 -3
  140. optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +60 -8
  141. optimum/rbln/transformers/models/whisper/generation_whisper.py +48 -14
  142. optimum/rbln/transformers/models/whisper/modeling_whisper.py +2 -2
  143. optimum/rbln/transformers/models/whisper/whisper_architecture.py +0 -3
  144. optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +53 -0
  145. optimum/rbln/transformers/utils/rbln_quantization.py +29 -12
  146. optimum/rbln/utils/deprecation.py +213 -0
  147. optimum/rbln/utils/hub.py +14 -3
  148. optimum/rbln/utils/import_utils.py +23 -2
  149. optimum/rbln/utils/runtime_utils.py +42 -6
  150. optimum/rbln/utils/submodule.py +27 -1
  151. {optimum_rbln-0.9.3rc0.dist-info → optimum_rbln-0.9.5a4.dist-info}/METADATA +6 -6
  152. {optimum_rbln-0.9.3rc0.dist-info → optimum_rbln-0.9.5a4.dist-info}/RECORD +155 -129
  153. {optimum_rbln-0.9.3rc0.dist-info → optimum_rbln-0.9.5a4.dist-info}/WHEEL +1 -1
  154. optimum/rbln/transformers/models/colqwen2/colqwen2_architecture.py +0 -233
  155. optimum/rbln/utils/depreacate_utils.py +0 -16
  156. {optimum_rbln-0.9.3rc0.dist-info → optimum_rbln-0.9.5a4.dist-info}/entry_points.txt +0 -0
  157. {optimum_rbln-0.9.3rc0.dist-info → optimum_rbln-0.9.5a4.dist-info}/licenses/LICENSE +0 -0
@@ -1,233 +0,0 @@
1
- # Copyright 2025 Rebellions Inc. All rights reserved.
2
-
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at:
6
-
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
-
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import List, Optional, Tuple, Union
16
-
17
- import torch
18
- import torch.nn as nn
19
- from transformers import PreTrainedModel
20
-
21
- from optimum.rbln.transformers.models.decoderonly.decoderonly_architecture import (
22
- DecoderOnlyLayer,
23
- DecoderOnlyModel,
24
- DecoderOnlyWrapper,
25
- )
26
-
27
- from .configuration_colqwen2 import (
28
- RBLNColQwen2ForRetrievalConfig,
29
- )
30
-
31
-
32
- def slice_and_unsqueeze_cos_sin(cos, sin, position_ids):
33
- """Slice cos[cache_position], sin[cache_position] vector for the query."""
34
- cos = cos[position_ids[0]][None, None, None, :, :]
35
- sin = sin[position_ids[0]][None, None, None, :, :]
36
-
37
- return cos, sin
38
-
39
-
40
- class ColQwen2LanguageModelWrapper(DecoderOnlyWrapper):
41
- def __init__(
42
- self, model: PreTrainedModel, rbln_config: "RBLNColQwen2ForRetrievalConfig", use_rotary_emb: bool = True
43
- ):
44
- model.config = (
45
- model.config.vlm_config.text_config if hasattr(model.config, "vlm_config") else model.config.text_config
46
- )
47
- super().__init__(model, rbln_config, use_rotary_emb)
48
-
49
- def get_decoder_layers(self, model: PreTrainedModel):
50
- return model.language_model.layers
51
-
52
- def convert_to_rbln_class(self, model: PreTrainedModel, max_seq_len: int):
53
- new_layers = []
54
- for layer_idx, layer in enumerate(self.get_decoder_layers(model)):
55
- is_sliding = layer_idx in self.rbln_config.sliding_window_layers
56
- new_self_attn = self.get_rbln_attn_class()(
57
- self.get_attn_layer(layer),
58
- self.rbln_config,
59
- is_sliding=is_sliding,
60
- )
61
- new_layer = self.get_rbln_layer_class()(layer, new_self_attn)
62
- new_layers.append(new_layer)
63
-
64
- new_model = self.get_rbln_model_class()(
65
- model.language_model,
66
- new_layers,
67
- self.rbln_config,
68
- use_learned_pos_emb=self.__class__._use_learned_pos_emb,
69
- )
70
-
71
- # text_projection layer from model
72
- self.embedding_proj_layer = (
73
- model.embedding_proj_layer if hasattr(model, "embedding_proj_layer") else model.custom_text_proj
74
- )
75
- return new_model
76
-
77
- def get_rbln_model_class(self):
78
- return RBLNColQwen2LanguageModel
79
-
80
- def prepare_forward_args(self, *args):
81
- args = list(args)
82
- input_ids = None if self.rbln_config.use_inputs_embeds else args.pop(0)
83
- inputs_embeds = args.pop(0) if self.rbln_config.use_inputs_embeds else None
84
- cache_position = args.pop(0)
85
- global_block_tables = args.pop(0)
86
- local_block_tables = None
87
- position_embeds = args.pop(0)
88
- position_ids = None
89
- attention_mask = args.pop(0) if self.rbln_config.use_attention_mask else None
90
- past_key_values = args
91
-
92
- if len(past_key_values) != 2 * self.num_hidden_layers:
93
- raise ValueError(
94
- f"Different past_key_values to model's config. {len(past_key_values)} != {2 * self.num_hidden_layers}"
95
- )
96
-
97
- _past_key_values = []
98
- for i in range(self.config.num_hidden_layers):
99
- key_states = past_key_values[i * 2]
100
- value_states = past_key_values[i * 2 + 1]
101
- past_key_value = [key_states, value_states]
102
- _past_key_values.append(past_key_value)
103
- past_key_values = _past_key_values
104
-
105
- return (
106
- input_ids,
107
- inputs_embeds,
108
- cache_position,
109
- global_block_tables,
110
- local_block_tables,
111
- attention_mask,
112
- position_ids,
113
- past_key_values,
114
- position_embeds,
115
- )
116
-
117
- def forward(self, *args):
118
- (
119
- input_ids,
120
- inputs_embeds,
121
- cache_position,
122
- global_block_tables,
123
- local_block_tables,
124
- attention_mask,
125
- position_ids,
126
- past_key_values,
127
- rotary_emb,
128
- ) = self.prepare_forward_args(*args)
129
-
130
- last_hidden_states = self.model(
131
- input_ids=input_ids,
132
- inputs_embeds=inputs_embeds,
133
- attention_mask=attention_mask,
134
- cache_position=cache_position,
135
- position_ids=position_ids,
136
- past_key_values=past_key_values,
137
- rotary_emb=rotary_emb,
138
- global_block_tables=global_block_tables,
139
- local_block_tables=local_block_tables,
140
- )
141
-
142
- proj = self.embedding_proj_layer(last_hidden_states[0])
143
- all_hidden_states = last_hidden_states[1] if self.rbln_config.output_hidden_states else None
144
-
145
- if self.rbln_config.output_hidden_states:
146
- return proj, all_hidden_states
147
- else:
148
- return proj
149
-
150
-
151
- class RBLNColQwen2LanguageModel(DecoderOnlyModel):
152
- def __init__(
153
- self,
154
- model,
155
- layers: List["DecoderOnlyLayer"],
156
- rbln_config: "RBLNColQwen2ForRetrievalConfig",
157
- use_learned_pos_emb=None,
158
- ):
159
- super().__init__(model, layers, rbln_config, use_learned_pos_emb)
160
-
161
- self.output_hidden_states = rbln_config.output_hidden_states
162
-
163
- def forward(
164
- self,
165
- input_ids: torch.Tensor = None,
166
- inputs_embeds: Optional[torch.Tensor] = None,
167
- attention_mask: torch.Tensor = None,
168
- cache_position: torch.Tensor = None,
169
- position_ids: torch.Tensor = None,
170
- query_position: torch.Tensor = None,
171
- past_key_values: Tuple[Tuple[torch.Tensor]] = None,
172
- rotary_emb: Optional[Union[nn.Module, torch.Tensor]] = None,
173
- global_block_tables: Optional[torch.Tensor] = None,
174
- local_block_tables: Optional[torch.Tensor] = None,
175
- lora_int_id: Optional[torch.Tensor] = None,
176
- ):
177
- # retrieve input_ids and inputs_embeds
178
- if (input_ids is None) ^ (inputs_embeds is not None):
179
- raise ValueError(
180
- "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
181
- )
182
-
183
- # embed positions
184
- if inputs_embeds is None:
185
- inputs_embeds = self.get_embedding()(input_ids)
186
-
187
- hidden_states = inputs_embeds * self.hidden_multiplier
188
-
189
- # get cos,sin vector if needed
190
- position_ids = position_ids if position_ids is not None else cache_position
191
- if rotary_emb is not None:
192
- if isinstance(rotary_emb, torch.Tensor):
193
- cos = rotary_emb[0]
194
- sin = rotary_emb[1]
195
- else:
196
- cos, sin = rotary_emb(hidden_states, self.max_seq_len) # dtype carrier, max_seq_len
197
- cos, sin = slice_and_unsqueeze_cos_sin(cos, sin, position_ids)
198
-
199
- # Get sequence positions for flash attention
200
- if self.attn_impl == "flash_attn":
201
- seq_positions = cache_position[:, 0]
202
- seq_positions = self.convert_sequence_positions_for_flash_attn(
203
- seq_positions=seq_positions, max_seq_len=self.max_seq_len
204
- )
205
- else:
206
- seq_positions = cache_position[:, :1]
207
-
208
- # Get local cache positions for sliding window layers
209
- if len(self.sliding_window_layers) > 0:
210
- sliding_cache_pos = self.get_local_cache_positions(position_ids, query_position)
211
-
212
- all_hidden_states = () if self.output_hidden_states else None
213
- for layer_idx, layer in enumerate(self.layers):
214
- if self.output_hidden_states:
215
- all_hidden_states += (hidden_states,)
216
-
217
- is_sliding = True if layer_idx in self.sliding_window_layers else False
218
- hidden_states = layer(
219
- hidden_states=hidden_states,
220
- attention_mask=attention_mask,
221
- seq_positions=sliding_cache_pos if is_sliding else seq_positions,
222
- past_key_values=past_key_values,
223
- cos=cos,
224
- sin=sin,
225
- block_tables=local_block_tables if is_sliding else global_block_tables,
226
- lora_int_id=lora_int_id,
227
- )
228
-
229
- hidden_states = self.get_last_layernorm()(hidden_states)
230
- if self.output_hidden_states:
231
- all_hidden_states += (hidden_states,)
232
-
233
- return hidden_states, all_hidden_states
@@ -1,16 +0,0 @@
1
- from typing import Optional
2
-
3
- import rebel
4
-
5
- from .logging import get_logger
6
-
7
-
8
- logger = get_logger(__name__)
9
-
10
-
11
- def warn_deprecated_npu(npu: Optional[str] = None):
12
- npu = npu or rebel.get_npu_name()
13
- if npu == "RBLN-CA02":
14
- logger.warning_once(
15
- "Support for the RBLN-CA02 device is provided only up to optimum-rbln v0.8.0 and has reached end of life.",
16
- )