optimum-rbln 0.7.4a5__py3-none-any.whl → 0.7.4a7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- optimum/rbln/__init__.py +16 -0
- optimum/rbln/__version__.py +2 -2
- optimum/rbln/modeling_base.py +22 -3
- optimum/rbln/transformers/__init__.py +16 -0
- optimum/rbln/transformers/models/__init__.py +24 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +67 -41
- optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +198 -99
- optimum/rbln/transformers/models/idefics3/__init__.py +16 -0
- optimum/rbln/transformers/models/idefics3/configuration_idefics3.py +51 -0
- optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +459 -0
- optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +6 -0
- optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +68 -0
- optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +608 -0
- optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +214 -0
- optimum/rbln/utils/runtime_utils.py +33 -2
- optimum/rbln/utils/submodule.py +10 -1
- {optimum_rbln-0.7.4a5.dist-info → optimum_rbln-0.7.4a7.dist-info}/METADATA +1 -1
- {optimum_rbln-0.7.4a5.dist-info → optimum_rbln-0.7.4a7.dist-info}/RECORD +21 -14
- {optimum_rbln-0.7.4a5.dist-info → optimum_rbln-0.7.4a7.dist-info}/WHEEL +0 -0
- {optimum_rbln-0.7.4a5.dist-info → optimum_rbln-0.7.4a7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,459 @@
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
2
|
+
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
import importlib
|
16
|
+
import inspect
|
17
|
+
from pathlib import Path
|
18
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, Tuple, Union
|
19
|
+
|
20
|
+
import rebel
|
21
|
+
import torch
|
22
|
+
from transformers import (
|
23
|
+
AutoModelForVision2Seq,
|
24
|
+
Idefics3ForConditionalGeneration,
|
25
|
+
Idefics3VisionConfig,
|
26
|
+
Idefics3VisionTransformer,
|
27
|
+
PretrainedConfig,
|
28
|
+
PreTrainedModel,
|
29
|
+
)
|
30
|
+
from transformers.modeling_outputs import BaseModelOutput
|
31
|
+
from transformers.modeling_utils import no_init_weights
|
32
|
+
from transformers.models.idefics3.modeling_idefics3 import Idefics3CausalLMOutputWithPast, Idefics3VisionEmbeddings
|
33
|
+
|
34
|
+
from ....configuration_utils import RBLNCompileConfig, RBLNModelConfig
|
35
|
+
from ....modeling import RBLNModel
|
36
|
+
from ....utils.runtime_utils import RBLNPytorchRuntime
|
37
|
+
from ..decoderonly.modeling_decoderonly import (
|
38
|
+
RBLNDecoderOnlyOutput,
|
39
|
+
)
|
40
|
+
|
41
|
+
|
42
|
+
if TYPE_CHECKING:
|
43
|
+
from transformers import (
|
44
|
+
AutoFeatureExtractor,
|
45
|
+
AutoProcessor,
|
46
|
+
AutoTokenizer,
|
47
|
+
)
|
48
|
+
|
49
|
+
|
50
|
+
class RBLNRuntimeVisionModel(RBLNPytorchRuntime):
|
51
|
+
mandatory_members = ["main_input_name"]
|
52
|
+
|
53
|
+
def __init__(
|
54
|
+
self,
|
55
|
+
runtime: rebel.Runtime,
|
56
|
+
config: Idefics3VisionConfig,
|
57
|
+
**kwargs: Any,
|
58
|
+
) -> None:
|
59
|
+
super().__init__(runtime, **kwargs)
|
60
|
+
self.patch_size = config.patch_size
|
61
|
+
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
|
62
|
+
|
63
|
+
def forward(
|
64
|
+
self,
|
65
|
+
pixel_values,
|
66
|
+
patch_attention_mask: Optional[torch.BoolTensor] = None,
|
67
|
+
return_dict: Optional[bool] = None,
|
68
|
+
**kwargs,
|
69
|
+
):
|
70
|
+
batch_size = pixel_values.size(0)
|
71
|
+
if patch_attention_mask is None:
|
72
|
+
patch_size = self.patch_size
|
73
|
+
patch_attention_mask = torch.ones(
|
74
|
+
(
|
75
|
+
batch_size,
|
76
|
+
pixel_values.size(2) // patch_size,
|
77
|
+
pixel_values.size(3) // patch_size,
|
78
|
+
)
|
79
|
+
)
|
80
|
+
patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
|
81
|
+
|
82
|
+
hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
|
83
|
+
|
84
|
+
return super().forward(hidden_states.contiguous())
|
85
|
+
|
86
|
+
|
87
|
+
class RBLNIdefics3VisionTransformer(RBLNModel):
|
88
|
+
def __post_init__(self, **kwargs):
|
89
|
+
artifacts = torch.load(self.model_save_dir / self.subfolder / "torch_artifacts.pth", weights_only=False)
|
90
|
+
with no_init_weights():
|
91
|
+
self.embeddings = Idefics3VisionEmbeddings(self.config)
|
92
|
+
self.embeddings.load_state_dict(artifacts["embeddings"])
|
93
|
+
self.model = RBLNRuntimeVisionModel(
|
94
|
+
self.model[0], main_input_name="pixel_values", config=self.config, embeddings=self.embeddings
|
95
|
+
)
|
96
|
+
|
97
|
+
@classmethod
|
98
|
+
def save_torch_artifacts(
|
99
|
+
cls,
|
100
|
+
model: "PreTrainedModel",
|
101
|
+
save_dir_path: Path,
|
102
|
+
subfolder: str,
|
103
|
+
rbln_config: RBLNModelConfig,
|
104
|
+
):
|
105
|
+
"""
|
106
|
+
If you are unavoidably running on a CPU rather than an RBLN device,
|
107
|
+
store the torch tensor, weight, etc. in this function.
|
108
|
+
"""
|
109
|
+
save_dict = {}
|
110
|
+
save_dict["embeddings"] = model.get_input_embeddings().state_dict()
|
111
|
+
torch.save(save_dict, save_dir_path / subfolder / "torch_artifacts.pth")
|
112
|
+
|
113
|
+
def get_input_embeddings(self):
|
114
|
+
return self.embeddings
|
115
|
+
|
116
|
+
@classmethod
|
117
|
+
def wrap_model_if_needed(cls, model: torch.nn.Module, rbln_config: RBLNModelConfig) -> torch.nn.Module:
|
118
|
+
class Idefics3VisionTransformerWrapper(torch.nn.Module):
|
119
|
+
def __init__(self, model: "Idefics3VisionTransformer"):
|
120
|
+
super().__init__()
|
121
|
+
self.encoder = model.encoder
|
122
|
+
self.post_layernorm = model.post_layernorm
|
123
|
+
|
124
|
+
def forward(self, hidden_states, patch_attention_mask: Optional[torch.BoolTensor] = None):
|
125
|
+
encoder_outputs = self.encoder(
|
126
|
+
inputs_embeds=hidden_states,
|
127
|
+
attention_mask=patch_attention_mask,
|
128
|
+
output_attentions=None,
|
129
|
+
output_hidden_states=None,
|
130
|
+
return_dict=False,
|
131
|
+
)
|
132
|
+
last_hidden_state = encoder_outputs[0]
|
133
|
+
last_hidden_state = self.post_layernorm(last_hidden_state)
|
134
|
+
return last_hidden_state
|
135
|
+
|
136
|
+
return Idefics3VisionTransformerWrapper(model).eval()
|
137
|
+
|
138
|
+
@classmethod
|
139
|
+
def _update_rbln_config(
|
140
|
+
cls,
|
141
|
+
preprocessors: Optional[Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"]],
|
142
|
+
model: Optional["PreTrainedModel"] = None,
|
143
|
+
model_config: Optional["PretrainedConfig"] = None,
|
144
|
+
rbln_config: Optional[RBLNModelConfig] = None,
|
145
|
+
) -> RBLNModelConfig:
|
146
|
+
input_info = [
|
147
|
+
(
|
148
|
+
"hidden_states",
|
149
|
+
[
|
150
|
+
# batch_size * num_patches (dependent on image size) -> compile with 1 and use for loop
|
151
|
+
1,
|
152
|
+
(model_config.image_size // model_config.patch_size) ** 2,
|
153
|
+
model_config.hidden_size,
|
154
|
+
],
|
155
|
+
"float32",
|
156
|
+
),
|
157
|
+
]
|
158
|
+
|
159
|
+
rbln_compile_config = RBLNCompileConfig(input_info=input_info)
|
160
|
+
rbln_config.set_compile_cfgs([rbln_compile_config])
|
161
|
+
return rbln_config
|
162
|
+
|
163
|
+
def forward(
|
164
|
+
self,
|
165
|
+
pixel_values,
|
166
|
+
patch_attention_mask: Optional[torch.BoolTensor] = None,
|
167
|
+
return_dict: Optional[bool] = None,
|
168
|
+
**kwargs,
|
169
|
+
) -> Union[Tuple, BaseModelOutput]:
|
170
|
+
batch_size = pixel_values.shape[0]
|
171
|
+
last_hidden_state = []
|
172
|
+
for i in range(batch_size):
|
173
|
+
if patch_attention_mask is not None:
|
174
|
+
batch_attention_mask = patch_attention_mask[i : i + 1,]
|
175
|
+
else:
|
176
|
+
batch_attention_mask = None
|
177
|
+
|
178
|
+
batch_hidden_state = self.model(
|
179
|
+
pixel_values[i : i + 1,],
|
180
|
+
batch_attention_mask,
|
181
|
+
return_dict=False,
|
182
|
+
)
|
183
|
+
last_hidden_state.append(batch_hidden_state)
|
184
|
+
last_hidden_state = torch.cat(last_hidden_state, dim=0)
|
185
|
+
|
186
|
+
if not return_dict:
|
187
|
+
return (last_hidden_state,)
|
188
|
+
else:
|
189
|
+
return BaseModelOutput(last_hidden_state=last_hidden_state)
|
190
|
+
|
191
|
+
|
192
|
+
class RBLNIdefics3ForConditionalGeneration(RBLNModel):
|
193
|
+
auto_model_class = AutoModelForVision2Seq
|
194
|
+
_rbln_submodules = [{"name": "vision_model"}, {"name": "text_model"}]
|
195
|
+
_rbln_submodule_prefix = "model"
|
196
|
+
|
197
|
+
def __getattr__(self, __name: str) -> Any:
|
198
|
+
def redirect(func):
|
199
|
+
return lambda *pargs, **kwargs: func(self, *pargs, **kwargs)
|
200
|
+
|
201
|
+
val = getattr(Idefics3ForConditionalGeneration, __name)
|
202
|
+
|
203
|
+
if isinstance(val, Callable) and "self" in set(inspect.signature(val).parameters):
|
204
|
+
return redirect(val)
|
205
|
+
return val
|
206
|
+
|
207
|
+
def can_generate(self):
|
208
|
+
return True
|
209
|
+
|
210
|
+
@classmethod
|
211
|
+
def get_pytorch_model(cls, *args, **kwargs):
|
212
|
+
model = super().get_pytorch_model(*args, **kwargs)
|
213
|
+
|
214
|
+
with no_init_weights():
|
215
|
+
model_cls_name = model.model.text_model.__class__.__name__
|
216
|
+
causal_model_cls_name = model_cls_name.replace("Model", "ForCausalLM")
|
217
|
+
causal_model_cls = getattr(importlib.import_module("transformers"), causal_model_cls_name)
|
218
|
+
new_text_model = causal_model_cls(model.model.text_model.config)
|
219
|
+
|
220
|
+
new_text_model.lm_head = model.lm_head
|
221
|
+
new_text_model.model = model.model.text_model
|
222
|
+
model.model.text_model = new_text_model
|
223
|
+
model.lm_head = None
|
224
|
+
del model.lm_head
|
225
|
+
return model
|
226
|
+
|
227
|
+
def __post_init__(self, **kwargs):
|
228
|
+
self.vision_model = self.rbln_submodules[0]
|
229
|
+
self.connector = self.model[0]
|
230
|
+
self.text_model = self.rbln_submodules[1]
|
231
|
+
|
232
|
+
def get_attn_impl(self) -> str:
|
233
|
+
return self.rbln_config.text_model.attn_impl
|
234
|
+
|
235
|
+
def get_kvcache_num_blocks(self) -> int:
|
236
|
+
return self.rbln_config.text_model.kvcache_num_blocks
|
237
|
+
|
238
|
+
def get_input_embeddings(self):
|
239
|
+
return self.text_model.get_input_embeddings()
|
240
|
+
|
241
|
+
@classmethod
|
242
|
+
def wrap_model_if_needed(cls, model, rbln_config):
|
243
|
+
return model.model.connector
|
244
|
+
|
245
|
+
@classmethod
|
246
|
+
def _update_rbln_config(
|
247
|
+
cls,
|
248
|
+
preprocessors: Optional[Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"]],
|
249
|
+
model: Optional["PreTrainedModel"] = None,
|
250
|
+
model_config: Optional["PretrainedConfig"] = None,
|
251
|
+
rbln_config: Optional[RBLNModelConfig] = None,
|
252
|
+
) -> RBLNModelConfig:
|
253
|
+
input_info = [
|
254
|
+
(
|
255
|
+
"image_hidden_states",
|
256
|
+
[
|
257
|
+
# batch_size * num_patches (dependent on image size) -> compile with 1 and use for loop
|
258
|
+
1,
|
259
|
+
(model_config.vision_config.image_size // model_config.vision_config.patch_size) ** 2,
|
260
|
+
model_config.vision_config.hidden_size,
|
261
|
+
],
|
262
|
+
"float32",
|
263
|
+
),
|
264
|
+
]
|
265
|
+
|
266
|
+
rbln_compile_config = RBLNCompileConfig(input_info=input_info)
|
267
|
+
rbln_config.set_compile_cfgs([rbln_compile_config])
|
268
|
+
|
269
|
+
return rbln_config
|
270
|
+
|
271
|
+
def prepare_inputs_for_generation(
|
272
|
+
self,
|
273
|
+
input_ids,
|
274
|
+
attention_mask=None,
|
275
|
+
inputs_embeds=None,
|
276
|
+
cache_position=None,
|
277
|
+
pixel_values=None,
|
278
|
+
pixel_attention_mask=None,
|
279
|
+
image_hidden_states=None,
|
280
|
+
generate_idx=None,
|
281
|
+
**kwargs,
|
282
|
+
):
|
283
|
+
is_prefill_phase = generate_idx is None
|
284
|
+
model_inputs = {}
|
285
|
+
|
286
|
+
if is_prefill_phase:
|
287
|
+
generate_idx = attention_mask.sum(dim=-1, keepdim=True).int()
|
288
|
+
cache_position = None
|
289
|
+
pixel_values = pixel_values
|
290
|
+
pixel_attention_mask = pixel_attention_mask
|
291
|
+
else:
|
292
|
+
if inputs_embeds is not None:
|
293
|
+
raise NotImplementedError("Specifying inputs_embeds in decoder phase is not supported.")
|
294
|
+
|
295
|
+
pixel_values = None
|
296
|
+
pixel_attention_mask = None
|
297
|
+
input_ids = input_ids[:, -1:]
|
298
|
+
cache_position = generate_idx
|
299
|
+
generate_idx = generate_idx + 1
|
300
|
+
model_inputs.update({"input_ids": input_ids})
|
301
|
+
|
302
|
+
if inputs_embeds is not None:
|
303
|
+
if self.rbln_config.use_inputs_embeds:
|
304
|
+
model_inputs.update({"inputs_embeds": inputs_embeds})
|
305
|
+
else:
|
306
|
+
raise ValueError(
|
307
|
+
"The specifying inputs_embeds is only supported when using a compiled RBLN model with 'rbln_use_inputs_embeds' set to True."
|
308
|
+
)
|
309
|
+
else:
|
310
|
+
model_inputs.update({"input_ids": input_ids})
|
311
|
+
|
312
|
+
model_inputs.update(
|
313
|
+
{
|
314
|
+
"attention_mask": attention_mask,
|
315
|
+
"pixel_values": pixel_values,
|
316
|
+
"pixel_attention_mask": pixel_attention_mask,
|
317
|
+
"image_hidden_states": image_hidden_states,
|
318
|
+
"cache_position": cache_position,
|
319
|
+
"generate_idx": generate_idx,
|
320
|
+
}
|
321
|
+
)
|
322
|
+
return model_inputs
|
323
|
+
|
324
|
+
def _update_model_kwargs_for_generation(self, outputs, model_kwargs, is_encoder_decoder, **kwargs):
|
325
|
+
model_kwargs["generate_idx"] = outputs.generate_idx
|
326
|
+
return model_kwargs
|
327
|
+
|
328
|
+
def inputs_merger(
|
329
|
+
self,
|
330
|
+
input_ids: torch.LongTensor,
|
331
|
+
inputs_embeds: Optional[torch.Tensor],
|
332
|
+
image_hidden_states: Optional[torch.Tensor],
|
333
|
+
):
|
334
|
+
num_images, _, vision_hidden_size = image_hidden_states.shape
|
335
|
+
special_image_token_mask = input_ids == self.config.image_token_id
|
336
|
+
new_inputs_embeds = inputs_embeds.clone()
|
337
|
+
reshaped_image_hidden_states = image_hidden_states.view(-1, vision_hidden_size)
|
338
|
+
reshaped_image_hidden_states = reshaped_image_hidden_states.to(inputs_embeds.device, inputs_embeds.dtype)
|
339
|
+
new_inputs_embeds[special_image_token_mask] = reshaped_image_hidden_states
|
340
|
+
return new_inputs_embeds
|
341
|
+
|
342
|
+
def _preprocess_prefill(
|
343
|
+
self,
|
344
|
+
input_ids: torch.LongTensor = None,
|
345
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
346
|
+
pixel_values: Optional[torch.FloatTensor] = None,
|
347
|
+
pixel_attention_mask: Optional[torch.BoolTensor] = None,
|
348
|
+
image_hidden_states: Optional[torch.FloatTensor] = None,
|
349
|
+
**kwargs,
|
350
|
+
):
|
351
|
+
if input_ids is not None:
|
352
|
+
batch_size, _ = input_ids.shape
|
353
|
+
elif inputs_embeds is not None:
|
354
|
+
batch_size, _, _ = inputs_embeds.shape
|
355
|
+
else:
|
356
|
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
357
|
+
|
358
|
+
if inputs_embeds is not None and input_ids is None:
|
359
|
+
raise ValueError("When first calling the model, if input_embeds are passed, input_ids should not be None.")
|
360
|
+
|
361
|
+
if inputs_embeds is None:
|
362
|
+
inputs_embeds = self.get_input_embeddings()(input_ids).to(self.device)
|
363
|
+
|
364
|
+
if pixel_values is not None and image_hidden_states is not None:
|
365
|
+
raise ValueError("You cannot specify both pixel_values and image_hidden_states at the same time")
|
366
|
+
|
367
|
+
elif pixel_values is not None:
|
368
|
+
batch_size, num_images, num_channels, height, width = pixel_values.shape
|
369
|
+
pixel_values = pixel_values.to(dtype=self.dtype)
|
370
|
+
pixel_values = pixel_values.view(batch_size * num_images, *pixel_values.shape[2:])
|
371
|
+
|
372
|
+
nb_values_per_image = pixel_values.shape[1:].numel()
|
373
|
+
real_images_inds = (pixel_values == 0.0).sum(dim=(-1, -2, -3)) != nb_values_per_image
|
374
|
+
pixel_values = pixel_values[real_images_inds].contiguous()
|
375
|
+
|
376
|
+
if pixel_attention_mask is None:
|
377
|
+
pixel_attention_mask = torch.ones(
|
378
|
+
size=(pixel_values.size(0), pixel_values.size(2), pixel_values.size(3)),
|
379
|
+
dtype=torch.bool,
|
380
|
+
device=pixel_values.device,
|
381
|
+
)
|
382
|
+
else:
|
383
|
+
pixel_attention_mask = pixel_attention_mask.view(
|
384
|
+
batch_size * num_images, *pixel_attention_mask.shape[2:]
|
385
|
+
)
|
386
|
+
pixel_attention_mask = pixel_attention_mask[real_images_inds].contiguous()
|
387
|
+
|
388
|
+
patch_size = self.config.vision_config.patch_size
|
389
|
+
patches_subgrid = pixel_attention_mask.unfold(dimension=1, size=patch_size, step=patch_size)
|
390
|
+
patches_subgrid = patches_subgrid.unfold(dimension=2, size=patch_size, step=patch_size)
|
391
|
+
patch_attention_mask = (patches_subgrid.sum(dim=(-1, -2)) > 0).bool()
|
392
|
+
|
393
|
+
image_hidden_states = self.vision_model(
|
394
|
+
pixel_values=pixel_values, patch_attention_mask=patch_attention_mask, return_dict=True
|
395
|
+
).last_hidden_state
|
396
|
+
|
397
|
+
connector_outputs = []
|
398
|
+
for i in range(image_hidden_states.shape[0]):
|
399
|
+
connector_outputs.append(self.connector(image_hidden_states[i : i + 1,]))
|
400
|
+
image_hidden_states = torch.cat(connector_outputs, dim=0)
|
401
|
+
|
402
|
+
elif image_hidden_states is not None:
|
403
|
+
image_hidden_states = image_hidden_states.to(dtype=self.dtype, device=input_ids.device)
|
404
|
+
|
405
|
+
if inputs_embeds is not None and image_hidden_states is not None:
|
406
|
+
inputs_embeds = self.inputs_merger(
|
407
|
+
input_ids=input_ids,
|
408
|
+
inputs_embeds=inputs_embeds,
|
409
|
+
image_hidden_states=image_hidden_states,
|
410
|
+
)
|
411
|
+
|
412
|
+
return inputs_embeds
|
413
|
+
|
414
|
+
def forward(
|
415
|
+
self,
|
416
|
+
input_ids: torch.LongTensor = None,
|
417
|
+
attention_mask: Optional[torch.Tensor] = None,
|
418
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
419
|
+
pixel_values: Optional[torch.FloatTensor] = None,
|
420
|
+
pixel_attention_mask: Optional[torch.BoolTensor] = None,
|
421
|
+
image_hidden_states: Optional[torch.FloatTensor] = None,
|
422
|
+
cache_position: torch.Tensor = None,
|
423
|
+
generate_idx: Optional[torch.Tensor] = None,
|
424
|
+
**kwargs,
|
425
|
+
) -> Union[Tuple, Idefics3CausalLMOutputWithPast]:
|
426
|
+
# Prefill
|
427
|
+
if cache_position is None:
|
428
|
+
inputs_embeds = self._preprocess_prefill(
|
429
|
+
input_ids, inputs_embeds, pixel_values, pixel_attention_mask, image_hidden_states
|
430
|
+
)
|
431
|
+
logits = []
|
432
|
+
inputs = inputs_embeds if inputs_embeds is not None else input_ids
|
433
|
+
batch_size = inputs.shape[0]
|
434
|
+
|
435
|
+
for b_idx in range(batch_size):
|
436
|
+
cache_position = torch.arange(0, generate_idx[b_idx].item(), dtype=torch.int32).unsqueeze(0)
|
437
|
+
logit = self.text_model.prefill_decoder(
|
438
|
+
input_ids=inputs[b_idx : b_idx + 1] if inputs_embeds is None else None,
|
439
|
+
inputs_embeds=inputs[b_idx : b_idx + 1] if inputs_embeds is not None else None,
|
440
|
+
attention_mask=attention_mask[b_idx] if attention_mask is not None else None,
|
441
|
+
cache_position=cache_position,
|
442
|
+
batch_idx=b_idx,
|
443
|
+
)
|
444
|
+
logits.append(logit)
|
445
|
+
|
446
|
+
logits = torch.cat(logits, dim=0)
|
447
|
+
|
448
|
+
# Decoder
|
449
|
+
else:
|
450
|
+
logits = self.text_model.decoder(
|
451
|
+
input_ids=input_ids,
|
452
|
+
inputs_embeds=inputs_embeds,
|
453
|
+
cache_position=cache_position,
|
454
|
+
)
|
455
|
+
|
456
|
+
return RBLNDecoderOnlyOutput(
|
457
|
+
logits=logits,
|
458
|
+
generate_idx=generate_idx,
|
459
|
+
)
|
@@ -157,6 +157,12 @@ class RBLNLlavaNextForConditionalGeneration(RBLNModel):
|
|
157
157
|
self._padding_side = "left" # set it to left by default, user can use setter to change padding_sides
|
158
158
|
return super().__post_init__(**kwargs)
|
159
159
|
|
160
|
+
def get_attn_impl(self) -> str:
|
161
|
+
return self.rbln_config.language_model.attn_impl
|
162
|
+
|
163
|
+
def get_kvcache_num_blocks(self) -> int:
|
164
|
+
return self.rbln_config.language_model.kvcache_num_blocks
|
165
|
+
|
160
166
|
def get_input_embeddings(self):
|
161
167
|
return self.language_model.get_input_embeddings()
|
162
168
|
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
2
|
+
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from .configuration_qwen2_5_vl import (
|
16
|
+
RBLNQwen2_5_VisionTransformerPretrainedModelConfig,
|
17
|
+
RBLNQwen2_5_VLForConditionalGenerationConfig,
|
18
|
+
)
|
19
|
+
from .modeling_qwen2_5_vl import RBLNQwen2_5_VisionTransformerPretrainedModel, RBLNQwen2_5_VLForConditionalGeneration
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
2
|
+
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from typing import List, Optional, Union
|
16
|
+
|
17
|
+
from ....configuration_utils import RBLNModelConfig
|
18
|
+
from ..decoderonly.configuration_decoderonly import RBLNDecoderOnlyModelForCausalLMConfig
|
19
|
+
|
20
|
+
|
21
|
+
class RBLNQwen2_5_VLForConditionalGenerationConfig(RBLNDecoderOnlyModelForCausalLMConfig):
|
22
|
+
submodules = ["visual"]
|
23
|
+
|
24
|
+
def __init__(
|
25
|
+
self,
|
26
|
+
visual: Optional[RBLNModelConfig] = None,
|
27
|
+
use_inputs_embeds: bool = True,
|
28
|
+
**kwargs,
|
29
|
+
):
|
30
|
+
super().__init__(use_inputs_embeds=use_inputs_embeds, **kwargs)
|
31
|
+
if not self.use_inputs_embeds:
|
32
|
+
raise ValueError(
|
33
|
+
"RBLNQwen2_5_VLForConditionalGenerationConfig does not allow `use_inputs_embeds` to be set to False, "
|
34
|
+
"as RBLNQwen2_5_VLForConditionalGeneration accepts only `inputs_embeds` as input."
|
35
|
+
)
|
36
|
+
self.visual = visual
|
37
|
+
|
38
|
+
|
39
|
+
class RBLNQwen2_5_VisionTransformerPretrainedModelConfig(RBLNModelConfig):
|
40
|
+
def __init__(self, max_seq_lens: Union[int, List[int]] = None, **kwargs):
|
41
|
+
"""
|
42
|
+
Args:
|
43
|
+
max_seq_lens (Optional[Union[int, List[int]]]): Maximum sequence lengths for Vision
|
44
|
+
Transformer attention. Can be an integer or list of integers, each indicating
|
45
|
+
the number of patches in a sequence for an image or video. For example, an image
|
46
|
+
of 224x196 pixels with patch size 14 and window size 112 has its width padded to
|
47
|
+
224, forming a 224x224 image. This yields 256 patches [(224/14) * (224/14)], so
|
48
|
+
`max_seq_len` must be at least 256. For window-based attention, `max_seq_len`
|
49
|
+
must be a multiple of `(window_size / patch_size)^2`, e.g., (112/14)^2 = 64,
|
50
|
+
making 256 (64 * 4) valid. RBLN optimization runs inference per image or video
|
51
|
+
frame, so set `max_seq_len` to match the maximum expected resolution to reduce
|
52
|
+
computation. If not provided, a `ValueError` is raised.
|
53
|
+
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
54
|
+
|
55
|
+
Raises:
|
56
|
+
ValueError: If batch_size is not a positive integer.
|
57
|
+
"""
|
58
|
+
super().__init__(**kwargs)
|
59
|
+
|
60
|
+
if max_seq_lens is not None:
|
61
|
+
if isinstance(max_seq_lens, int):
|
62
|
+
max_seq_lens = [max_seq_lens]
|
63
|
+
elif isinstance(max_seq_lens, list):
|
64
|
+
max_seq_lens.sort(reverse=True)
|
65
|
+
else:
|
66
|
+
raise ValueError("'max_seq_lens' must be specified.")
|
67
|
+
|
68
|
+
self.max_seq_lens = max_seq_lens
|