optimum-rbln 0.9.2a10__py3-none-any.whl → 0.9.2rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of optimum-rbln might be problematic. Click here for more details.

@@ -0,0 +1,446 @@
1
+ # Copyright 2025 Rebellions Inc. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Optional, Union
16
+
17
+ import torch
18
+ from transformers import (
19
+ PretrainedConfig,
20
+ PreTrainedModel,
21
+ )
22
+ from transformers.modeling_utils import no_init_weights
23
+ from transformers.models.colqwen2.modeling_colqwen2 import ColQwen2ForRetrievalOutput
24
+ from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
25
+ Qwen2_5_VLModel,
26
+ Qwen2_5_VLRotaryEmbedding,
27
+ )
28
+ from transformers.models.qwen2_vl.modeling_qwen2_vl import (
29
+ Qwen2VLModel,
30
+ Qwen2VLRotaryEmbedding,
31
+ )
32
+
33
+ from optimum.rbln.transformers.models.decoderonly.modeling_decoderonly import (
34
+ RBLNDecoderOnlyModel,
35
+ )
36
+
37
+ from .configuration_colqwen2 import (
38
+ RBLNColQwen2ForRetrievalConfig,
39
+ )
40
+
41
+
42
+ if TYPE_CHECKING:
43
+ from transformers import (
44
+ AutoFeatureExtractor,
45
+ AutoProcessor,
46
+ AutoTokenizer,
47
+ PretrainedConfig,
48
+ )
49
+
50
+ from .colqwen2_architecture import ColQwen2LanguageModelWrapper
51
+
52
+
53
+ class RBLNColQwen2ForRetrieval(RBLNDecoderOnlyModel):
54
+ """
55
+ The ColQwen Model transformer for document retrieval using vision-language models.
56
+ This model inherits from [`RBLNDecoderOnlyModel`]. Check the superclass documentation for the generic methods the library implements for all its models.
57
+
58
+ A class to convert and run pre-trained transformers based `ColQwen2ForRetrieval` model on RBLN devices.
59
+ It implements the methods to convert a pre-trained transformers `ColQwen2ForRetrieval` model into a RBLN transformer model by:
60
+
61
+ - transferring the checkpoint weights of the original into an optimized RBLN graph,
62
+ - compiling the resulting graph using the RBLN compiler.
63
+
64
+ **Configuration:**
65
+ This model uses [`RBLNColQwen2ForRetrievalConfig`] for configuration. When calling methods like `from_pretrained` or `from_model`,
66
+ the `rbln_config` parameter should be an instance of [`RBLNColQwen2ForRetrievalConfig`] or a dictionary conforming to its structure.
67
+
68
+ See the [`RBLNColQwen2ForRetrievalConfig`] class for all available configuration options.
69
+
70
+ Examples:
71
+ ```python
72
+ from optimum.rbln import RBLNColQwen2ForRetrieval
73
+
74
+ # Using a config dictionary
75
+ rbln_config = {
76
+ "visual": {
77
+ "max_seq_lens": 6400,
78
+ },
79
+ "max_seq_len": 32_768,
80
+ "tensor_parallel_size": 4,
81
+ "device": [0, 1, 2, 3],
82
+ "output_hidden_states": False,
83
+ }
84
+ model = RBLNColQwen2ForRetrieval.from_pretrained(
85
+ "vidore/colqwen2-v1.0-hf",
86
+ export=True,
87
+ rbln_config=rbln_config
88
+ )
89
+
90
+ # Using a RBLNColQwen2ForRetrievalConfig instance (recommended for type checking)
91
+ from optimum.rbln import RBLNColQwen2ForRetrievalConfig
92
+
93
+ config = RBLNColQwen2ForRetrievalConfig(
94
+ visual={
95
+ "max_seq_lens": 6400,
96
+ "device": 0,
97
+ },
98
+ max_seq_len=32_768,
99
+ tensor_parallel_size=4,
100
+ device=[0, 1, 2, 3],
101
+ output_hidden_states=False,
102
+ )
103
+ model = RBLNColQwen2ForRetrieval.from_pretrained(
104
+ "vidore/colqwen2-v1.0-hf",
105
+ export=True,
106
+ rbln_config=config
107
+ )
108
+ ```
109
+ """
110
+
111
+ main_input_name = "inputs_embeds"
112
+ auto_model_class = None
113
+ _rbln_submodules = [
114
+ {"name": "visual"},
115
+ ]
116
+ _decoder_wrapper_cls = ColQwen2LanguageModelWrapper
117
+ _use_rotary_emb = False
118
+
119
+ def __post_init__(self, **kwargs):
120
+ self.config = self.config.vlm_config if hasattr(self.config, "vlm_config") else self.config
121
+
122
+ artifacts = torch.load(
123
+ self.model_save_dir / self.subfolder / "torch_artifacts.pth",
124
+ weights_only=False,
125
+ )
126
+ self.embed_tokens = self._create_embedding_layer()
127
+ self.embed_tokens.load_state_dict(artifacts["embed_tokens"])
128
+ self.visual = self.rbln_submodules[0]
129
+ self.prefill_runtime = self.model[0]
130
+ self.mrope_section = self.config.text_config.rope_scaling["mrope_section"]
131
+ self.is_colqwen2_5 = "qwen2_5_vl" in self.config.model_type
132
+
133
+ if self.is_colqwen2_5:
134
+ self.rotary_emb = Qwen2_5_VLRotaryEmbedding(self.config.text_config)
135
+ else:
136
+ self.rotary_emb = Qwen2VLRotaryEmbedding(self.config.text_config)
137
+ self.block_tables = torch.arange(self.rbln_config.kvcache_num_blocks, dtype=torch.int16)
138
+
139
+ @classmethod
140
+ def _reconstruct_model_if_needed(cls, model: "PreTrainedModel"):
141
+ if hasattr(model, "vlm"):
142
+ model.visual = model.vlm.visual
143
+ model.language_model = model.vlm.language_model
144
+
145
+ # FIXME: temporary fix for ColQwen2ForRetrieval dtype issue
146
+ return model.to(torch.float32)
147
+
148
+ def _create_embedding_layer(self):
149
+ with no_init_weights():
150
+ embed_tokens = torch.nn.Embedding(
151
+ self.config.text_config.vocab_size,
152
+ self.config.text_config.hidden_size,
153
+ self.config.text_config.pad_token_id,
154
+ )
155
+ return embed_tokens
156
+
157
+ @classmethod
158
+ def get_input_info(
159
+ cls,
160
+ batch_size: int,
161
+ query_length: int,
162
+ rbln_config: RBLNColQwen2ForRetrievalConfig,
163
+ model_config: PretrainedConfig,
164
+ ):
165
+ text_config = model_config.text_config
166
+ input_info = super().get_input_info(
167
+ batch_size,
168
+ query_length,
169
+ rbln_config=rbln_config,
170
+ model_config=text_config,
171
+ )
172
+
173
+ pos_idx = 3
174
+ input_info.insert(
175
+ pos_idx,
176
+ (
177
+ "position_emb",
178
+ [
179
+ 2,
180
+ batch_size,
181
+ 1,
182
+ query_length,
183
+ text_config.hidden_size // text_config.num_attention_heads,
184
+ ],
185
+ rbln_config.torch_dtype,
186
+ ),
187
+ )
188
+
189
+ # remove query postion from input_info
190
+ if "query_position" in input_info:
191
+ query_position = input_info.pop(4)
192
+ assert query_position[0] == "query_position", print(query_position[0], "is deleted.")
193
+ return input_info
194
+
195
+ @classmethod
196
+ def _update_rbln_config(
197
+ cls,
198
+ preprocessors: Optional[Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"]] = None,
199
+ model: Optional["PreTrainedModel"] = None,
200
+ model_config: Optional["PretrainedConfig"] = None,
201
+ rbln_config: Optional[RBLNColQwen2ForRetrievalConfig] = None,
202
+ ) -> RBLNColQwen2ForRetrievalConfig:
203
+ model_config = model_config.vlm_config if hasattr(model_config, "vlm_config") else model_config
204
+ if rbln_config.output_hidden_states is None:
205
+ rbln_config.output_hidden_states = getattr(model_config.text_config, "output_hidden_states", False)
206
+
207
+ return super()._update_rbln_config(
208
+ preprocessors=preprocessors, model=model, model_config=model_config, rbln_config=rbln_config
209
+ )
210
+
211
+ def _get_position_embeddings(self, hidden_states, position_ids):
212
+ cos, sin = self.rotary_emb(hidden_states, position_ids)
213
+ mrope_section = self.mrope_section * 2
214
+ cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(1)
215
+ sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(1)
216
+ return torch.stack([cos, sin])
217
+
218
+ def get_rope_index(self, *args, **kwargs):
219
+ if self.is_colqwen2_5:
220
+ return Qwen2_5_VLModel.get_rope_index(self, *args, **kwargs)
221
+ else:
222
+ return Qwen2VLModel.get_rope_index(self, *args, **kwargs)
223
+
224
+ def _preprocess_visual(
225
+ self,
226
+ input_ids: torch.LongTensor = None,
227
+ attention_mask: torch.Tensor = None,
228
+ pixel_values: torch.Tensor = None,
229
+ pixel_values_videos: torch.FloatTensor = None,
230
+ image_grid_thw: torch.LongTensor = None,
231
+ video_grid_thw: torch.LongTensor = None,
232
+ second_per_grid_ts: torch.Tensor = None,
233
+ ):
234
+ batch_size = input_ids.shape[0]
235
+ inputs_embeds = self.embed_tokens(input_ids)
236
+
237
+ if pixel_values is not None:
238
+ image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
239
+ n_image_tokens = (input_ids == self.config.image_token_id).sum().item()
240
+ n_image_features = image_embeds.shape[0]
241
+ if n_image_tokens != n_image_features:
242
+ raise ValueError(
243
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
244
+ )
245
+
246
+ mask = input_ids == self.config.image_token_id
247
+ mask_unsqueezed = mask.unsqueeze(-1)
248
+ mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
249
+
250
+ image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
251
+ inputs_embeds = inputs_embeds.masked_scatter(mask_expanded, image_embeds)
252
+
253
+ if pixel_values_videos is not None:
254
+ video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
255
+ n_video_tokens = (input_ids == self.config.video_token_id).sum().item()
256
+ n_video_features = video_embeds.shape[0]
257
+ if n_video_tokens != n_video_features:
258
+ raise ValueError(
259
+ f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
260
+ )
261
+
262
+ mask = input_ids == self.config.video_token_id
263
+ mask_unsqueezed = mask.unsqueeze(-1)
264
+ mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
265
+ inputs_embeds = inputs_embeds.masked_scatter(mask_expanded, video_embeds)
266
+
267
+ max_inputs_len = input_ids.shape[1]
268
+ head_dim = self.config.text_config.hidden_size // self.config.text_config.num_attention_heads
269
+ all_position_embeds = torch.zeros(2, batch_size, 1, max_inputs_len, head_dim)
270
+ all_rope_deltas = []
271
+
272
+ image_token_id = self.config.image_token_id
273
+ video_token_id = self.config.video_token_id
274
+ vision_start_token_id = self.config.vision_start_token_id
275
+ image_idx, video_idx = 0, 0
276
+
277
+ for b_idx in range(batch_size):
278
+ input_id = input_ids[b_idx : b_idx + 1][:, attention_mask[b_idx].bool()]
279
+ vision_start_indices = torch.argwhere(input_id == vision_start_token_id).squeeze(1)
280
+ vision_tokens = input_id[0][vision_start_indices + 1]
281
+ image_nums = (vision_tokens == image_token_id).sum()
282
+ video_nums = (vision_tokens == video_token_id).sum()
283
+ args = [
284
+ input_id,
285
+ image_grid_thw[image_idx : image_idx + image_nums] if image_grid_thw is not None else None,
286
+ video_grid_thw[video_idx : video_idx + video_nums] if video_grid_thw is not None else None,
287
+ ]
288
+ if self.config.model_type == "qwen2_5_vl":
289
+ args.append(
290
+ second_per_grid_ts[video_idx : video_idx + video_nums] if second_per_grid_ts is not None else None
291
+ )
292
+ position_ids, rope_deltas = self.get_rope_index(*args)
293
+ image_idx += image_nums
294
+ video_idx += video_nums
295
+
296
+ position_embed = self._get_position_embeddings(inputs_embeds, position_ids)
297
+ mask_indices = torch.nonzero(attention_mask[b_idx], as_tuple=True)[0]
298
+ all_position_embeds[:, b_idx : b_idx + 1].index_copy_(dim=-2, index=mask_indices, source=position_embed)
299
+ all_rope_deltas.append(rope_deltas)
300
+
301
+ rope_deltas = torch.stack(all_rope_deltas)
302
+
303
+ return inputs_embeds, all_position_embeds, rope_deltas
304
+
305
+ def _preprocess_chunked_prefill(self, inputs_embeds, attention_mask, position_embed):
306
+ # valid sequence length of inputs_embeds
307
+ query_length = inputs_embeds.shape[1] if attention_mask is None else torch.sum(attention_mask.view(-1)).item()
308
+
309
+ # extract valid inputs
310
+ inputs_embeds = inputs_embeds[:, attention_mask.bool()] if attention_mask is not None else inputs_embeds
311
+ position_embed = (
312
+ position_embed[:, :, :, attention_mask.bool(), :] if attention_mask is not None else position_embed
313
+ )
314
+
315
+ # add padding for chunked prefill
316
+ padding_size = (
317
+ self.rbln_config.prefill_chunk_size - (query_length % self.rbln_config.prefill_chunk_size)
318
+ ) % self.rbln_config.prefill_chunk_size
319
+ padded_len = query_length + padding_size
320
+
321
+ inputs_embeds = torch.nn.functional.pad(inputs_embeds, (0, 0, 0, padding_size))
322
+ position_embed = torch.nn.functional.pad(position_embed, (0, 0, 0, padding_size))
323
+ cache_position = torch.arange(padded_len, dtype=torch.int32).unsqueeze(0)
324
+
325
+ return inputs_embeds, position_embed, cache_position, query_length
326
+
327
+ def _chunked_prefill_forward(
328
+ self,
329
+ inputs_embeds: torch.Tensor,
330
+ attention_mask: Optional[torch.Tensor] = None,
331
+ position_embed: Optional[torch.Tensor] = None,
332
+ output_hidden_states: Optional[bool] = False,
333
+ ):
334
+ padded_inputs_embeds, padded_position_embed, cache_position, query_length = self._preprocess_chunked_prefill(
335
+ inputs_embeds, attention_mask, position_embed
336
+ )
337
+
338
+ # Chunked prefill
339
+ projs = []
340
+ all_hidden_states = [] if output_hidden_states else None
341
+ for step in range(0, query_length, self.rbln_config.prefill_chunk_size):
342
+ # Extract the current chunk of inputs and cache positions
343
+ input_chunk = padded_inputs_embeds[:, step : step + self.rbln_config.prefill_chunk_size]
344
+ cache_pos_chunk = cache_position[:, step : step + self.rbln_config.prefill_chunk_size]
345
+ position_embed_chunk = padded_position_embed[:, :, :, step : step + self.rbln_config.prefill_chunk_size, :]
346
+
347
+ # Forward pass for the current chunk
348
+ proj = self.prefill_runtime(
349
+ inputs_embeds=input_chunk,
350
+ cache_position=cache_pos_chunk,
351
+ block_tables=self.block_tables,
352
+ position_emb=position_embed_chunk,
353
+ )
354
+
355
+ if output_hidden_states:
356
+ projs.append(proj[0])
357
+ all_hidden_states.append(proj[1:])
358
+ else:
359
+ projs.append(proj)
360
+
361
+ projs = torch.concat(projs, dim=-2)[:, :query_length]
362
+ if output_hidden_states:
363
+ # Concatenate chunks for each layer
364
+ concatenated_hidden_states = [
365
+ torch.concat(hs_chunks, dim=-2)[:, :query_length] for hs_chunks in list(zip(*all_hidden_states))
366
+ ]
367
+ all_hidden_states = tuple(concatenated_hidden_states)
368
+
369
+ return self._postprocess_chunked_prefill(projs, attention_mask), all_hidden_states
370
+
371
+ def _postprocess_chunked_prefill(self, projs, attention_mask):
372
+ # index copy for attention mask
373
+ if attention_mask is not None:
374
+ embedding = torch.full(
375
+ (1, attention_mask.shape[-1], projs.shape[-1]),
376
+ fill_value=1e-10,
377
+ dtype=projs.dtype,
378
+ )
379
+ mask_indices = torch.nonzero(attention_mask, as_tuple=True)[0]
380
+ embedding.index_copy_(dim=-2, index=mask_indices, source=projs)
381
+ else:
382
+ embedding = projs
383
+ return embedding
384
+
385
+ def forward(
386
+ self,
387
+ input_ids: Optional[torch.LongTensor] = None,
388
+ inputs_embeds: Optional[torch.FloatTensor] = None,
389
+ attention_mask: Optional[torch.Tensor] = None,
390
+ pixel_values: Optional[torch.Tensor] = None,
391
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
392
+ image_grid_thw: Optional[torch.LongTensor] = None,
393
+ video_grid_thw: Optional[torch.LongTensor] = None,
394
+ second_per_grid_ts: Optional[torch.Tensor] = None,
395
+ output_hidden_states: Optional[bool] = None,
396
+ **kwargs,
397
+ ) -> torch.Tensor:
398
+ output_hidden_states = (
399
+ output_hidden_states if output_hidden_states is not None else self.rbln_config.output_hidden_states
400
+ )
401
+
402
+ if output_hidden_states != self.rbln_config.output_hidden_states:
403
+ raise ValueError(
404
+ f"Variable output_hidden_states {output_hidden_states} is not equal to rbln_config.output_hidden_states {self.rbln_config.output_hidden_states} "
405
+ f"Please compile again with the correct argument."
406
+ )
407
+
408
+ # Handle the custom "pixel_values" input obtained with `ColQwen2Processor` through unpadding
409
+ if pixel_values is not None and image_grid_thw is not None:
410
+ offsets = image_grid_thw[:, 1] * image_grid_thw[:, 2] # (batch_size,)
411
+ pixel_values = torch.cat(
412
+ [pixel_sequence[:offset] for pixel_sequence, offset in zip(pixel_values, offsets)],
413
+ dim=0,
414
+ )
415
+ # visual preprocessing
416
+ inputs_embeds, position_embed, _ = self._preprocess_visual(
417
+ input_ids,
418
+ attention_mask,
419
+ pixel_values,
420
+ pixel_values_videos,
421
+ image_grid_thw,
422
+ video_grid_thw,
423
+ second_per_grid_ts,
424
+ )
425
+ batch_size = inputs_embeds.shape[0]
426
+
427
+ projs = []
428
+ for b_idx in range(batch_size):
429
+ proj = self._chunked_prefill_forward(
430
+ inputs_embeds[b_idx : b_idx + 1],
431
+ attention_mask[b_idx] if attention_mask is not None else None,
432
+ position_embed[:, b_idx : b_idx + 1],
433
+ output_hidden_states=output_hidden_states,
434
+ )
435
+ projs.append(proj[0])
436
+ all_hidden_states = proj[1] if output_hidden_states else ()
437
+
438
+ # postprocess
439
+ projs = torch.cat(projs, dim=0)
440
+ projs = projs / projs.norm(dim=-1, keepdim=True)
441
+ projs = projs * attention_mask.unsqueeze(-1)
442
+
443
+ return ColQwen2ForRetrievalOutput(
444
+ embeddings=projs,
445
+ hidden_states=all_hidden_states,
446
+ )
@@ -175,21 +175,36 @@ class ContextRblnConfig:
175
175
  self.create_runtimes = create_runtimes
176
176
  self.activate_profiler = activate_profiler
177
177
  self.timeout = timeout
178
+ self._previous_context = None
178
179
 
179
180
  def __enter__(self):
180
- self._local.device = self.device
181
- self._local.device_map = self.device_map
182
- self._local.create_runtimes = self.create_runtimes
183
- self._local.activate_profiler = self.activate_profiler
184
- self._local.timeout = self.timeout
181
+ self._previous_context = {
182
+ "device": getattr(self._local, "device", None),
183
+ "device_map": getattr(self._local, "device_map", None),
184
+ "create_runtimes": getattr(self._local, "create_runtimes", None),
185
+ "activate_profiler": getattr(self._local, "activate_profiler", None),
186
+ "timeout": getattr(self._local, "timeout", None),
187
+ }
188
+
189
+ if self.device is not None:
190
+ self._local.device = self.device
191
+ if self.device_map is not None:
192
+ self._local.device_map = self.device_map
193
+ if self.create_runtimes is not None:
194
+ self._local.create_runtimes = self.create_runtimes
195
+ if self.activate_profiler is not None:
196
+ self._local.activate_profiler = self.activate_profiler
197
+ if self.timeout is not None:
198
+ self._local.timeout = self.timeout
185
199
  return self
186
200
 
187
201
  def __exit__(self, exc_type, exc_val, exc_tb):
188
- self._local.device = None
189
- self._local.device_map = None
190
- self._local.create_runtimes = None
191
- self._local.activate_profiler = None
192
- self._local.timeout = None
202
+ if self._previous_context is not None:
203
+ self._local.device = self._previous_context["device"]
204
+ self._local.device_map = self._previous_context["device_map"]
205
+ self._local.create_runtimes = self._previous_context["create_runtimes"]
206
+ self._local.activate_profiler = self._previous_context["activate_profiler"]
207
+ self._local.timeout = self._previous_context["timeout"]
193
208
 
194
209
  @classmethod
195
210
  def get_current_context(cls):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: optimum-rbln
3
- Version: 0.9.2a10
3
+ Version: 0.9.2rc1
4
4
  Summary: Optimum RBLN is the interface between the HuggingFace Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
5
5
  Project-URL: Homepage, https://rebellions.ai
6
6
  Project-URL: Documentation, https://docs.rbln.ai
@@ -1,7 +1,8 @@
1
- optimum/rbln/__init__.py,sha256=AZ-7X3ZCjMNcz4mkC_98y-HWRw38ijh8gETJet9tiyM,18828
2
- optimum/rbln/__version__.py,sha256=Udcx8e3TDM3_lHc6m1QPrnI_qAbfRUcSl8dCg5wXfaY,714
1
+ optimum/rbln/__init__.py,sha256=ns14slnkiDevAQCeOXQoejSnzfk3WNuie4cyYiMQZSc,18980
2
+ optimum/rbln/__version__.py,sha256=WozqVRDHdwcyqBUlRJldKeU9qimFW1KUCZdsg_ks-Fg,714
3
+ optimum/rbln/cli.py,sha256=944P_f9btDyFryHfHzxUKQvwXWYD1hrceDuK6SWNQcQ,22832
3
4
  optimum/rbln/configuration_utils.py,sha256=uLjMsWyYz-4SQ2wbvYqDUZcau29EjU-AghF4q1LNGxw,37260
4
- optimum/rbln/modeling.py,sha256=h-Iiku3l9KWF1fBpg3loG74VWU13_n7VjVdry5OC06A,16082
5
+ optimum/rbln/modeling.py,sha256=50BE-bpn-GMImXjQGrG5rqnhofg1DHs6jyS2CzprPBY,16247
5
6
  optimum/rbln/modeling_base.py,sha256=blTZgayOh5U7zNhbrdyMuS1fq1-xd6N7y64I0lXDMU0,27589
6
7
  optimum/rbln/diffusers/__init__.py,sha256=1tgU_xWA42BmInqu9bBz_5R_E9TGhhK3mI06YlaiTLg,7232
7
8
  optimum/rbln/diffusers/modeling_diffusers.py,sha256=iybCd2KaEL5RMzRduWkHvKm90iXDcbUXsoKVfiNYDcY,20411
@@ -72,13 +73,13 @@ optimum/rbln/ops/flash_attn.py,sha256=yTCdYQVqm_1rHMHWjrMQaIR8WTuG_xA6t033x1IVvT
72
73
  optimum/rbln/ops/kv_cache_update.py,sha256=aIvK2Sp7M3EfJzJgNvIvAJv4emoN6QOhmgaWj-VboLs,1440
73
74
  optimum/rbln/ops/linear.py,sha256=5K3pcrrUHu_p8LrMIU-jX2TnafksveFjjZSCsYSp_yw,1328
74
75
  optimum/rbln/ops/sliding_window_attn.py,sha256=EQrV_yRGc5z6kvwEsAcLP028bJWkQg2UPI3xubt9skU,3487
75
- optimum/rbln/transformers/__init__.py,sha256=hyv53b_d_IJ9KYsDogTmKHDNuXIGNCFkrJI21RHE5ak,12573
76
+ optimum/rbln/transformers/__init__.py,sha256=orLCZJRJYcxVGpzBvgOUWFwqsxFXyvYf31LZmTs8T7g,12725
76
77
  optimum/rbln/transformers/configuration_generic.py,sha256=rM4XY1a_UlRf3ZCZkCav59JKRuvqiEEUUgnqNlgdcv8,5207
77
78
  optimum/rbln/transformers/modeling_attention_utils.py,sha256=aLyOaq4me1m-JMmnKbuyNQageDxNU2jjEhGE_ew2P5o,11465
78
79
  optimum/rbln/transformers/modeling_generic.py,sha256=tBbn0rPiJjmyjVXZUY-bIEgfKThFLgTOCRIE-E7R_vM,14214
79
80
  optimum/rbln/transformers/modeling_outputs.py,sha256=cd8ZlhHAGq7S6i5-QK6TJCxgORvoPMnZpqPBlUc_pMY,1177
80
81
  optimum/rbln/transformers/modeling_rope_utils.py,sha256=6Zg3r-TeUk4WQAlr95pqfhuoAD_RQ4njT1rbO9uPL0Q,14379
81
- optimum/rbln/transformers/models/__init__.py,sha256=yzcjyHCHH4-Mi26N34HzNs7Tl5HjjT1rrwQ8f_W2_nc,13532
82
+ optimum/rbln/transformers/models/__init__.py,sha256=NEDsbJgzO-0pM_B0zniHPnDxYrRIh_pBMnFefkzP5JA,13718
82
83
  optimum/rbln/transformers/models/audio_spectrogram_transformer/__init__.py,sha256=I2vL4lrzbT5p4eJcH-EKHzEfcPkj_XVsie7jb9q6yic,775
83
84
  optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py,sha256=z7LJiVJPmnlCM3mcyhPJP8AufSrxO_dsPeJ51onq-Nc,833
84
85
  optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py,sha256=FIKEVWpIt6-JQX9B_rAfCrAPqdUHtR2i8D_X2k7639E,1498
@@ -101,8 +102,12 @@ optimum/rbln/transformers/models/clip/configuration_clip.py,sha256=Ea8TCVmMayydf
101
102
  optimum/rbln/transformers/models/clip/modeling_clip.py,sha256=BLAYJAtv_2ZnKOlZ8iDBr2Su3bKM_eMWeUSK9MOaj7I,13198
102
103
  optimum/rbln/transformers/models/colpali/__init__.py,sha256=n3rueXT_oC0N8myoZiic0YkVK24CW5hZBPa-0L8so6Y,119
103
104
  optimum/rbln/transformers/models/colpali/colpali_architecture.py,sha256=TCOW3v5l9fIt1uIFtWa8ZAxq1cdCER8gXWjmbLQD20M,8079
104
- optimum/rbln/transformers/models/colpali/configuration_colpali.py,sha256=_HuZBVV-ponml95UapkYpRhffZy53-9jSZknx7hID7o,3348
105
+ optimum/rbln/transformers/models/colpali/configuration_colpali.py,sha256=qjaUC7S9kCZBWL9LsXnEo0woxsksPSHJpqA3TRTx6KE,3408
105
106
  optimum/rbln/transformers/models/colpali/modeling_colpali.py,sha256=2lHxvtrK3x2GOv7r-5nZelmjezm3ehe6Qf28cMdNmoQ,17961
107
+ optimum/rbln/transformers/models/colqwen2/__init__.py,sha256=gEKc5X4uGME4XKySDD1H6JlT89jaMvZ00HqbDVXNHU8,123
108
+ optimum/rbln/transformers/models/colqwen2/colqwen2_architecture.py,sha256=spIH6d-09asUBSqhuJN9NAK2Ke7Kv1RP7HdwMOcxf_s,8732
109
+ optimum/rbln/transformers/models/colqwen2/configuration_colqwen2.py,sha256=_HYOLR2O8xjEJvXn7LRU_BSxdysMXmJ7oEhCLhaG2z0,2649
110
+ optimum/rbln/transformers/models/colqwen2/modeling_colqwen2.py,sha256=Iy5wa3Aa-Vfjv4FTyDvL-KtyGAB9nBuGCPXz_Alv_l0,18598
106
111
  optimum/rbln/transformers/models/decoderonly/__init__.py,sha256=pKBXAtE3y_6nnwYfQJjdPmWqUwxuJ0lr8rrqkgyH07M,1126
107
112
  optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py,sha256=GX-IwTe6ywM9hmyquIu66y0YgIVZS5JNIz8LKAb4Ow8,17003
108
113
  optimum/rbln/transformers/models/decoderonly/configuration_lora.py,sha256=5DuTs2vy7jF7MLy161QD_KvCTaNW-5Mok7hBH0yK44U,17356
@@ -242,10 +247,11 @@ optimum/rbln/utils/hub.py,sha256=FPBGslHJAMeyfBID3viLmh51xJzcR29xWtYtMN8y2CI,276
242
247
  optimum/rbln/utils/import_utils.py,sha256=fpOERIIxXm-cDYGn1NN6c7aWDPQYVitPQW2MiyZ9NEY,5471
243
248
  optimum/rbln/utils/logging.py,sha256=VKKBmlQSdg6iZCGmAXaWYiW67K84jyp1QJhLQSSjPPE,3453
244
249
  optimum/rbln/utils/model_utils.py,sha256=4k5879Kh75m3x_vS4-qOGfqsOiAvc2kdNFFfvsFvz3k,1748
245
- optimum/rbln/utils/runtime_utils.py,sha256=wVIYE4KS7RNVc1y-5X41SmNdLz_Gpk7zlguKfujRbYo,7649
250
+ optimum/rbln/utils/runtime_utils.py,sha256=Sf0YOUeJkhByArEgqofb_THvFBYdMVIgF_MGvhL4i-w,8540
246
251
  optimum/rbln/utils/save_utils.py,sha256=hG5uOtYmecSXZuGTvCXsTM-SiyZpr5q3InUGCCq_jzQ,3619
247
252
  optimum/rbln/utils/submodule.py,sha256=SKLnM3KsX8_rv3HauO4oB2-JSjzuadQjRwo_BhMUzLI,6362
248
- optimum_rbln-0.9.2a10.dist-info/METADATA,sha256=YgUhyYWjwgxolBn7Qemf4UBCLHEatX-dOGCSCqS8g5s,5351
249
- optimum_rbln-0.9.2a10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
250
- optimum_rbln-0.9.2a10.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
251
- optimum_rbln-0.9.2a10.dist-info/RECORD,,
253
+ optimum_rbln-0.9.2rc1.dist-info/METADATA,sha256=yxmUuYsEcOT081Qt2J0S_07MHZ4V-QnYPILasp45SiU,5351
254
+ optimum_rbln-0.9.2rc1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
255
+ optimum_rbln-0.9.2rc1.dist-info/entry_points.txt,sha256=-orKDGKfLypxlPlTz8-ZkmdKULNvax9yeCCCn-q89n4,59
256
+ optimum_rbln-0.9.2rc1.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
257
+ optimum_rbln-0.9.2rc1.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ optimum-rbln-cli = optimum.rbln.cli:main