optimum-rbln 0.8.3a3__py3-none-any.whl → 0.8.3rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of optimum-rbln might be problematic. Click here for more details.

Files changed (29) hide show
  1. optimum/rbln/__init__.py +18 -0
  2. optimum/rbln/__version__.py +2 -2
  3. optimum/rbln/configuration_utils.py +16 -0
  4. optimum/rbln/diffusers/pipelines/auto_pipeline.py +1 -1
  5. optimum/rbln/modeling.py +1 -0
  6. optimum/rbln/modeling_base.py +24 -7
  7. optimum/rbln/transformers/__init__.py +18 -0
  8. optimum/rbln/transformers/configuration_generic.py +2 -0
  9. optimum/rbln/transformers/modeling_generic.py +12 -4
  10. optimum/rbln/transformers/models/__init__.py +23 -0
  11. optimum/rbln/transformers/models/auto/__init__.py +1 -0
  12. optimum/rbln/transformers/models/auto/modeling_auto.py +7 -0
  13. optimum/rbln/transformers/models/bert/bert_architecture.py +16 -0
  14. optimum/rbln/transformers/models/bert/modeling_bert.py +8 -4
  15. optimum/rbln/transformers/models/blip_2/modeling_blip_2.py +6 -1
  16. optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py +1 -1
  17. optimum/rbln/transformers/models/gemma3/modeling_gemma3.py +6 -1
  18. optimum/rbln/transformers/models/grounding_dino/__init__.py +10 -0
  19. optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py +86 -0
  20. optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py +507 -0
  21. optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py +1032 -0
  22. optimum/rbln/transformers/models/swin/__init__.py +16 -0
  23. optimum/rbln/transformers/models/swin/configuration_swin.py +42 -0
  24. optimum/rbln/transformers/models/swin/modeling_swin.py +341 -0
  25. optimum/rbln/utils/submodule.py +10 -4
  26. {optimum_rbln-0.8.3a3.dist-info → optimum_rbln-0.8.3rc0.dist-info}/METADATA +1 -1
  27. {optimum_rbln-0.8.3a3.dist-info → optimum_rbln-0.8.3rc0.dist-info}/RECORD +29 -21
  28. {optimum_rbln-0.8.3a3.dist-info → optimum_rbln-0.8.3rc0.dist-info}/WHEEL +0 -0
  29. {optimum_rbln-0.8.3a3.dist-info → optimum_rbln-0.8.3rc0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,16 @@
1
+ # Copyright 2025 Rebellions Inc. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .configuration_swin import RBLNSwinBackboneConfig
16
+ from .modeling_swin import RBLNSwinBackbone
@@ -0,0 +1,42 @@
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at:
4
+
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ from typing import Any, Optional, Tuple, Union
14
+
15
+ from ...configuration_generic import RBLNModelForImageClassificationConfig
16
+
17
+
18
+ class RBLNSwinBackboneConfig(RBLNModelForImageClassificationConfig):
19
+ def __init__(
20
+ self,
21
+ image_size: Optional[Union[int, Tuple[int, int]]] = None,
22
+ batch_size: Optional[int] = None,
23
+ output_hidden_states: Optional[bool] = None,
24
+ output_attentions: Optional[bool] = None,
25
+ **kwargs: Any,
26
+ ):
27
+ """
28
+ Args:
29
+ batch_size (Optional[int]): The batch size for text processing. Defaults to 1.
30
+ **kwargs: Additional arguments passed to the parent RBLNModelConfig.
31
+
32
+ Raises:
33
+ ValueError: If batch_size is not a positive integer.
34
+ """
35
+ super().__init__(**kwargs)
36
+ self.batch_size = batch_size or 1
37
+ if not isinstance(self.batch_size, int) or self.batch_size < 0:
38
+ raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
39
+
40
+ self.image_size = image_size
41
+ self.output_hidden_states = output_hidden_states
42
+ self.output_attentions = output_attentions
@@ -0,0 +1,341 @@
1
+ # Copyright 2025 Rebellions Inc. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import types
16
+ from typing import TYPE_CHECKING, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn.functional as F
20
+ from transformers import SwinConfig
21
+ from transformers.models.swin.modeling_swin import BackboneOutput
22
+
23
+ from ....configuration_utils import RBLNCompileConfig, RBLNModelConfig
24
+ from ....modeling import RBLNModel
25
+ from ....utils.logging import get_logger
26
+ from .configuration_swin import RBLNSwinBackboneConfig
27
+
28
+
29
+ logger = get_logger(__name__)
30
+
31
+ if TYPE_CHECKING:
32
+ from transformers import (
33
+ AutoFeatureExtractor,
34
+ AutoProcessor,
35
+ AutoTokenizer,
36
+ PreTrainedModel,
37
+ SwinBackbone,
38
+ SwinEncoder,
39
+ )
40
+
41
+
42
+ def window_partition(input_feature, window_size):
43
+ """
44
+ Partitions the given input into windows.
45
+ """
46
+ batch_size, height, width, num_channels = input_feature.shape
47
+ input_feature = input_feature.view(
48
+ batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
49
+ )
50
+ windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
51
+ return windows
52
+
53
+
54
+ def get_attn_mask(self, height, width, dtype, device):
55
+ if self.shift_size > 0:
56
+ # calculate attention mask for SW-MSA
57
+ img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
58
+ height_slices = (
59
+ slice(0, -self.window_size),
60
+ slice(-self.window_size, -self.shift_size),
61
+ slice(-self.shift_size, None),
62
+ )
63
+ width_slices = (
64
+ slice(0, -self.window_size),
65
+ slice(-self.window_size, -self.shift_size),
66
+ slice(-self.shift_size, None),
67
+ )
68
+ count = torch.zeros(1)
69
+ for height_slice in height_slices:
70
+ for width_slice in width_slices:
71
+ img_mask[:, height_slice, width_slice, :] = count
72
+ count += 1
73
+
74
+ mask_windows = window_partition(img_mask, self.window_size)
75
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
76
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
77
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
78
+ else:
79
+ attn_mask = None
80
+ return attn_mask
81
+
82
+
83
+ class _SwinEncoder(torch.nn.Module):
84
+ def __init__(self, model: "SwinEncoder"):
85
+ super().__init__()
86
+ self.layers = model.layers
87
+
88
+ def forward(
89
+ self,
90
+ hidden_states: torch.Tensor,
91
+ input_dimensions: Tuple[int, int],
92
+ head_mask: Optional[torch.FloatTensor] = None,
93
+ output_attentions: Optional[bool] = False,
94
+ output_hidden_states: Optional[bool] = False,
95
+ output_hidden_states_before_downsampling: Optional[bool] = False,
96
+ always_partition: Optional[bool] = False,
97
+ return_dict: Optional[bool] = True,
98
+ ):
99
+ all_hidden_states = () if output_hidden_states else None
100
+ all_reshaped_hidden_states = () if output_hidden_states else None
101
+ all_self_attentions = () if output_attentions else None
102
+
103
+ if output_hidden_states:
104
+ batch_size, _, hidden_size = hidden_states.shape
105
+ # rearrange b (h w) c -> b c h w
106
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
107
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
108
+ all_hidden_states += (hidden_states,)
109
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
110
+
111
+ for i, layer_module in enumerate(self.layers):
112
+ layer_head_mask = head_mask[i] if head_mask is not None else None
113
+
114
+ layer_outputs = layer_module(
115
+ hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
116
+ )
117
+
118
+ hidden_states = layer_outputs[0]
119
+ hidden_states_before_downsampling = layer_outputs[1]
120
+ output_dimensions = layer_outputs[2]
121
+
122
+ input_dimensions = (output_dimensions[-2], output_dimensions[-1])
123
+
124
+ if output_hidden_states and output_hidden_states_before_downsampling:
125
+ batch_size, _, hidden_size = hidden_states_before_downsampling.shape
126
+ # rearrange b (h w) c -> b c h w
127
+ # here we use the original (not downsampled) height and width
128
+ reshaped_hidden_state = hidden_states_before_downsampling.view(
129
+ batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
130
+ )
131
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
132
+ all_hidden_states += (hidden_states_before_downsampling,)
133
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
134
+ elif output_hidden_states and not output_hidden_states_before_downsampling:
135
+ batch_size, _, hidden_size = hidden_states.shape
136
+ # rearrange b (h w) c -> b c h w
137
+ reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
138
+ reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
139
+ all_hidden_states += (hidden_states,)
140
+ all_reshaped_hidden_states += (reshaped_hidden_state,)
141
+
142
+ if output_attentions:
143
+ all_self_attentions += layer_outputs[3:]
144
+
145
+ return tuple(
146
+ v
147
+ for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states]
148
+ if v is not None
149
+ )
150
+
151
+
152
+ class _SwinBackbone(torch.nn.Module):
153
+ def __init__(self, model: "SwinBackbone", output_hidden_states: bool, output_attentions: bool):
154
+ super().__init__()
155
+ self.model = model
156
+ self.embeddings = model.embeddings
157
+ self.encoder = model.encoder
158
+ self.stage_names = model.stage_names
159
+ self.out_features = model.out_features
160
+ self.hidden_states_norms = model.hidden_states_norms
161
+ self.output_hidden_states = output_hidden_states
162
+ self.output_attentions = output_attentions
163
+
164
+ def forward(
165
+ self,
166
+ pixel_values: torch.Tensor,
167
+ ):
168
+ embedding_output, input_dimensions = self.embeddings(pixel_values)
169
+ outputs = _SwinEncoder(self.encoder)(
170
+ embedding_output,
171
+ input_dimensions,
172
+ head_mask=None,
173
+ output_attentions=self.output_attentions,
174
+ output_hidden_states=True,
175
+ output_hidden_states_before_downsampling=True,
176
+ always_partition=True,
177
+ return_dict=False,
178
+ )
179
+
180
+ hidden_states = outputs[-1]
181
+
182
+ feature_maps = ()
183
+ for stage, hidden_state in zip(self.stage_names, hidden_states):
184
+ if stage in self.out_features:
185
+ batch_size, num_channels, height, width = hidden_state.shape
186
+ hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous()
187
+ hidden_state = hidden_state.view(batch_size, height * width, num_channels)
188
+ hidden_state = self.hidden_states_norms[stage](hidden_state)
189
+ hidden_state = hidden_state.view(batch_size, height, width, num_channels)
190
+ hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
191
+ feature_maps += (hidden_state,)
192
+
193
+ output = (feature_maps,)
194
+
195
+ if self.output_hidden_states:
196
+ output += (outputs[1],)
197
+
198
+ if self.output_attentions:
199
+ output += (outputs[2],)
200
+
201
+ return output
202
+
203
+
204
+ class RBLNSwinBackbone(RBLNModel):
205
+ @classmethod
206
+ def wrap_model_if_needed(cls, model: torch.nn.Module, rbln_config: RBLNSwinBackboneConfig) -> torch.nn.Module:
207
+ for layer in model.encoder.layers:
208
+ for block in layer.blocks:
209
+ block.get_attn_mask = types.MethodType(get_attn_mask, block)
210
+
211
+ wrapper_cfg = {
212
+ "output_hidden_states": rbln_config.output_hidden_states,
213
+ "output_attentions": rbln_config.output_attentions,
214
+ }
215
+ return _SwinBackbone(model, **wrapper_cfg).eval()
216
+
217
+ @classmethod
218
+ def _update_submodule_config(
219
+ cls,
220
+ model: "PreTrainedModel",
221
+ rbln_config: RBLNModelConfig,
222
+ preprocessors: Optional[Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"]],
223
+ ):
224
+ for processor in preprocessors:
225
+ if rbln_config.image_size is None and hasattr(processor, "image_processor"):
226
+ if "height" in processor.image_processor.size and "width" in processor.image_processor.size:
227
+ rbln_config.image_size = (
228
+ processor.image_processor.size["height"],
229
+ processor.image_processor.size["width"],
230
+ )
231
+ elif (
232
+ "longest_edge" in processor.image_processor.size
233
+ and "shortest_edge" in processor.image_processor.size
234
+ ):
235
+ rbln_config.image_size = processor.image_processor.size["longest_edge"]
236
+ elif "shortest_edge" in processor.image_processor.size:
237
+ rbln_config.image_size = processor.image_processor.size["shortest_edge"]
238
+ break
239
+
240
+ return rbln_config
241
+
242
+ @classmethod
243
+ def _update_rbln_config(
244
+ cls,
245
+ preprocessors: Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"],
246
+ model: Optional["PreTrainedModel"] = None,
247
+ model_config: "SwinConfig" = None,
248
+ rbln_config: Optional[RBLNSwinBackboneConfig] = None,
249
+ ) -> RBLNSwinBackboneConfig:
250
+ if rbln_config.image_size is None:
251
+ for processor in preprocessors:
252
+ if hasattr(processor, "size"):
253
+ if all(required_key in processor.size.keys() for required_key in ["height", "width"]):
254
+ rbln_config.image_size = (processor.size["height"], processor.size["width"])
255
+ break
256
+
257
+ input_info = [
258
+ (
259
+ "pixel_values",
260
+ [
261
+ rbln_config.batch_size,
262
+ 3,
263
+ rbln_config.image_height,
264
+ rbln_config.image_width,
265
+ ],
266
+ "float32",
267
+ ),
268
+ ]
269
+
270
+ rbln_config.set_compile_cfgs([RBLNCompileConfig(input_info=input_info)])
271
+ return rbln_config
272
+
273
+ def forward(
274
+ self,
275
+ pixel_values: Optional[torch.FloatTensor] = None,
276
+ return_dict: bool = True,
277
+ output_attentions: bool = None,
278
+ output_hidden_states: bool = None,
279
+ **kwargs,
280
+ ) -> Union[Tuple, BackboneOutput]:
281
+ if len(kwargs) > 0 and any(value is not None for value in kwargs.values()):
282
+ logger.warning(
283
+ f"Currently, optimum-rbln does not support kwargs {kwargs.keys()} for {self.__class__.__name__}."
284
+ )
285
+
286
+ output_attentions = output_attentions if output_attentions is not None else self.rbln_config.output_attentions
287
+ output_hidden_states = (
288
+ output_hidden_states if output_hidden_states is not None else self.rbln_config.output_hidden_states
289
+ )
290
+
291
+ if output_attentions != self.rbln_config.output_attentions:
292
+ raise ValueError(
293
+ f"Variable output_attentions {output_attentions} is not equal to rbln_config.output_attentions {self.rbln_config.output_attentions} "
294
+ f"Please compile again with the correct argument."
295
+ )
296
+
297
+ if output_hidden_states != self.rbln_config.output_hidden_states:
298
+ raise ValueError(
299
+ f"Variable output_hidden_states {output_hidden_states} is not equal to rbln_config.output_hidden_states {self.rbln_config.output_hidden_states} "
300
+ f"Please compile again with the correct argument."
301
+ )
302
+
303
+ _, _, original_h, original_w = pixel_values.shape
304
+ if original_h > self.rbln_config.image_height or original_w > self.rbln_config.image_width:
305
+ raise ValueError(
306
+ f"Input image size ({original_h}x{original_w}) exceeds the configured maximum size"
307
+ f" ({self.rbln_config.image_height}x{self.rbln_config.image_width})."
308
+ )
309
+
310
+ pad_h = self.rbln_config.image_height - original_h
311
+ pad_w = self.rbln_config.image_width - original_w
312
+ padded_pixel_values = F.pad(pixel_values, (0, pad_w, 0, pad_h))
313
+
314
+ output = self.model[0](padded_pixel_values)
315
+
316
+ feature_maps = ()
317
+ for i in range(len(self.config.out_features)):
318
+ feature_maps += (output.pop(0),)
319
+
320
+ if self.rbln_config.output_hidden_states:
321
+ hidden_states = ()
322
+ for i in range(len(self.config.stage_names)):
323
+ hidden_states += (output.pop(0),)
324
+ else:
325
+ hidden_states = None
326
+
327
+ if self.rbln_config.output_attentions:
328
+ attentions = ()
329
+ for i in range(len(self.config.depths)):
330
+ attentions += (output.pop(0),)
331
+ else:
332
+ attentions = None
333
+
334
+ if not return_dict:
335
+ return tuple(item for item in (feature_maps, hidden_states, attentions) if item is not None)
336
+ else:
337
+ return BackboneOutput(
338
+ feature_maps=feature_maps,
339
+ hidden_states=hidden_states,
340
+ attentions=attentions,
341
+ )
@@ -13,7 +13,7 @@
13
13
  # limitations under the License.
14
14
 
15
15
  from pathlib import Path
16
- from typing import TYPE_CHECKING, Any, Dict, List, Type
16
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
17
17
 
18
18
  from transformers import PretrainedConfig
19
19
 
@@ -22,7 +22,7 @@ from ..utils.model_utils import get_rbln_model_cls
22
22
 
23
23
 
24
24
  if TYPE_CHECKING:
25
- from transformers import PreTrainedModel
25
+ from transformers import AutoFeatureExtractor, AutoProcessor, AutoTokenizer, PreTrainedModel
26
26
 
27
27
  from ..modeling import RBLNModel
28
28
 
@@ -42,7 +42,12 @@ class SubModulesMixin:
42
42
  setattr(self, submodule_meta["name"], submodule)
43
43
 
44
44
  @classmethod
45
- def _update_submodule_config(cls, model: "PreTrainedModel", rbln_config: RBLNModelConfig):
45
+ def _update_submodule_config(
46
+ cls,
47
+ model: "PreTrainedModel",
48
+ rbln_config: RBLNModelConfig,
49
+ preprocessors: Optional[Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"]],
50
+ ):
46
51
  return rbln_config
47
52
 
48
53
  @classmethod
@@ -51,6 +56,7 @@ class SubModulesMixin:
51
56
  ) -> List["RBLNModel"]:
52
57
  rbln_submodules = []
53
58
  submodule_prefix = getattr(cls, "_rbln_submodule_prefix", None)
59
+ preprocessors = kwargs.pop("preprocessors", [])
54
60
 
55
61
  for submodule in cls._rbln_submodules:
56
62
  submodule_name = submodule["name"]
@@ -69,7 +75,7 @@ class SubModulesMixin:
69
75
  submodule_rbln_config = submodule_rbln_config_class(**submodule_rbln_config)
70
76
  setattr(rbln_config, submodule_name, submodule_rbln_config)
71
77
 
72
- submodule_rbln_config = submodule_cls._update_submodule_config(model, submodule_rbln_config)
78
+ submodule_rbln_config = submodule_cls._update_submodule_config(model, submodule_rbln_config, preprocessors)
73
79
 
74
80
  rbln_submodule = submodule_cls.from_model(
75
81
  model=torch_submodule,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: optimum-rbln
3
- Version: 0.8.3a3
3
+ Version: 0.8.3rc0
4
4
  Summary: Optimum RBLN is the interface between the HuggingFace Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
5
5
  Project-URL: Homepage, https://rebellions.ai
6
6
  Project-URL: Documentation, https://docs.rbln.ai
@@ -1,8 +1,8 @@
1
- optimum/rbln/__init__.py,sha256=uudbkt6M2dI7hyBqnKI7Rw_DcOo6Dye4VdEgHw0Kj2w,17578
2
- optimum/rbln/__version__.py,sha256=NC494o_z1A8m7nK7GwomeE3oY1ddXSWewbtXIl6IJmQ,712
3
- optimum/rbln/configuration_utils.py,sha256=xneqnRWSUVROqpzbTrBACex42-L9zwo3eSjfHjFuhv4,33072
4
- optimum/rbln/modeling.py,sha256=0CMQnpVvW9evNrTFHM2XFbNpRY1HkbFzYJ5sRyYFq0o,14293
5
- optimum/rbln/modeling_base.py,sha256=gHfqIO6lKT8smkUthUuRHnbITpxHpnDeBPT8iTeasCk,24575
1
+ optimum/rbln/__init__.py,sha256=32ouGKDGus9k5_kD27CxP8jIQOw66zpDTfS0xs1XlfE,18298
2
+ optimum/rbln/__version__.py,sha256=boIaJ8T6HCT9Qh8wBU3n-6ZyjtAKYgztQh0WMaN7BxM,714
3
+ optimum/rbln/configuration_utils.py,sha256=fE3HlZblxukKSdS-4VofjuyCAiqwPMX8bqXpOiTZp4g,33926
4
+ optimum/rbln/modeling.py,sha256=jMiJy9PGjZpXpAmRTFD5fTuj8xEbLCUmncIxGD6XWLk,14338
5
+ optimum/rbln/modeling_base.py,sha256=txBab-zVXcjqnF2gZJBzhrp5ruA3vwt3hjls0Q2S_0w,25492
6
6
  optimum/rbln/diffusers/__init__.py,sha256=1tgU_xWA42BmInqu9bBz_5R_E9TGhhK3mI06YlaiTLg,7232
7
7
  optimum/rbln/diffusers/modeling_diffusers.py,sha256=TAuMb7PSMjNwK7mh5ItE_CtAEgYeZKI27XkFFmxjHlQ,19902
8
8
  optimum/rbln/diffusers/configurations/__init__.py,sha256=vMRnPY4s-Uju43xP038D2EA18X_mhy2YfsZVpSU-VoA,1322
@@ -36,7 +36,7 @@ optimum/rbln/diffusers/models/transformers/transformer_sd3.py,sha256=yF7sS0Qvawo
36
36
  optimum/rbln/diffusers/models/unets/__init__.py,sha256=MaICuK9CWjgzejXy8y2NDrphuEq1rkzanF8u45k6O5I,655
37
37
  optimum/rbln/diffusers/models/unets/unet_2d_condition.py,sha256=v3WS9EGKROE_QClXrxC7rmRko1BspAvAbeIfh83LK88,15832
38
38
  optimum/rbln/diffusers/pipelines/__init__.py,sha256=r8mu21102cKXdkG1II9tpfpUS6wuyren2oK9y_MptZY,3703
39
- optimum/rbln/diffusers/pipelines/auto_pipeline.py,sha256=oGZWXfj82w695D2NiYUitgoWiwP2Z4PlgA3q6hoOKww,9502
39
+ optimum/rbln/diffusers/pipelines/auto_pipeline.py,sha256=zFDXbO9Iv0LO7maefV82dmi5Ta6L9oZxY09QFVX6F_Q,9511
40
40
  optimum/rbln/diffusers/pipelines/controlnet/__init__.py,sha256=n1Ef22TSeax-kENi_d8K6wGGHSNEo9QkUeygELHgcao,983
41
41
  optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py,sha256=3S9dogIHW8Bqg5kIlCudhCQG-4g3FcdOPEWhBOf7CJA,4059
42
42
  optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py,sha256=G96bh4D9Cu-w4F9gZBQF6wNzhJQv9kvI34ZFsuEDjSw,35714
@@ -72,29 +72,30 @@ optimum/rbln/ops/flash_attn.py,sha256=yTCdYQVqm_1rHMHWjrMQaIR8WTuG_xA6t033x1IVvT
72
72
  optimum/rbln/ops/kv_cache_update.py,sha256=aIvK2Sp7M3EfJzJgNvIvAJv4emoN6QOhmgaWj-VboLs,1440
73
73
  optimum/rbln/ops/linear.py,sha256=5K3pcrrUHu_p8LrMIU-jX2TnafksveFjjZSCsYSp_yw,1328
74
74
  optimum/rbln/ops/sliding_window_attn.py,sha256=EQrV_yRGc5z6kvwEsAcLP028bJWkQg2UPI3xubt9skU,3487
75
- optimum/rbln/transformers/__init__.py,sha256=2Xe086vpwskJzN9bujkuRkBV96Ov54kYmAB7gPjOh-U,11323
76
- optimum/rbln/transformers/configuration_generic.py,sha256=95ks6REJYuzI1zLwGlPSlxVV45saVcYOob6ihn-WAAY,5092
75
+ optimum/rbln/transformers/__init__.py,sha256=6s-VhsqwptqwUuq7vb847bJlfFgBGshOoK3vaN9i_lI,12043
76
+ optimum/rbln/transformers/configuration_generic.py,sha256=jrehv1oONOS-iBTY5gj2TKUfWjDTnukNJt6cZfNMylU,5213
77
77
  optimum/rbln/transformers/modeling_attention_utils.py,sha256=aLyOaq4me1m-JMmnKbuyNQageDxNU2jjEhGE_ew2P5o,11465
78
- optimum/rbln/transformers/modeling_generic.py,sha256=2BtroigKuu7z7C98dpLwI875R0EoHN-ceHEVbyPQuYk,12212
78
+ optimum/rbln/transformers/modeling_generic.py,sha256=82Wi2K6zAp5tjef05lzYIEqbK93h0_OkPDbElB-VMMs,12568
79
79
  optimum/rbln/transformers/modeling_outputs.py,sha256=cd8ZlhHAGq7S6i5-QK6TJCxgORvoPMnZpqPBlUc_pMY,1177
80
80
  optimum/rbln/transformers/modeling_rope_utils.py,sha256=6Zg3r-TeUk4WQAlr95pqfhuoAD_RQ4njT1rbO9uPL0Q,14379
81
- optimum/rbln/transformers/models/__init__.py,sha256=IYF1F4TZDfTBlfT3howesi_lNlQe-IUuJFsa4KXmWw4,12126
81
+ optimum/rbln/transformers/models/__init__.py,sha256=V36KWN0fTL0MvfDduUfjIiwXvWmwDKm43G-g5Y773-I,12943
82
82
  optimum/rbln/transformers/models/audio_spectrogram_transformer/__init__.py,sha256=I2vL4lrzbT5p4eJcH-EKHzEfcPkj_XVsie7jb9q6yic,775
83
83
  optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py,sha256=z7LJiVJPmnlCM3mcyhPJP8AufSrxO_dsPeJ51onq-Nc,833
84
84
  optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py,sha256=FIKEVWpIt6-JQX9B_rAfCrAPqdUHtR2i8D_X2k7639E,1498
85
- optimum/rbln/transformers/models/auto/__init__.py,sha256=cZaOoQBTfdT5qhcfsPbagDvDQ0Vvr6Oej5y0jE9AFbc,1105
85
+ optimum/rbln/transformers/models/auto/__init__.py,sha256=tdYqXkg9xBGNr4fZjH7_O3qRVbHvpEVjrJ6wtNUMMJM,1150
86
86
  optimum/rbln/transformers/models/auto/auto_factory.py,sha256=1CA52xV2dS1Uzumcgqe4zobdpoi-Xt2oNjP3uLFtm08,8020
87
- optimum/rbln/transformers/models/auto/modeling_auto.py,sha256=AbKqxcUxKtuX6Y2T7CdmrQn8I0r5DDkUzLKBnyor-GQ,4470
87
+ optimum/rbln/transformers/models/auto/modeling_auto.py,sha256=SMsWnD8f7VhKmh7h_S2voksEWlNccfF4fQ7AmwLYq6U,4790
88
88
  optimum/rbln/transformers/models/bart/__init__.py,sha256=fVo-gZEmJ0yxkIxEX6ciuRAGgXNyuvaXE2s88bhbjAE,830
89
89
  optimum/rbln/transformers/models/bart/bart_architecture.py,sha256=mAepjL0paPMK180vGTTCxXQ-hVZ1DD6JR-GvVNGJLqY,6268
90
90
  optimum/rbln/transformers/models/bart/configuration_bart.py,sha256=PrRA7OwPTegPamd_mmVnwNygRbNG7pZrsrXdKyfZ6Bo,1351
91
91
  optimum/rbln/transformers/models/bart/modeling_bart.py,sha256=H4MmQZbofb9kJq5WKqoFVjmj3HVtgns3t2F3QdSU-QQ,2337
92
92
  optimum/rbln/transformers/models/bert/__init__.py,sha256=86FuGRBLw315_Roa9D5OUx6Ku2PM0DqSPZ-YSqbF-io,806
93
+ optimum/rbln/transformers/models/bert/bert_architecture.py,sha256=cZgf-B-FV8qbeJdz2Oa-cHu7crrpwBhr081cEalC-h4,473
93
94
  optimum/rbln/transformers/models/bert/configuration_bert.py,sha256=nEZnX6LXpLKWaoPEd4pWSysw9h-PLb2ld0ibC3dcJ7w,1611
94
- optimum/rbln/transformers/models/bert/modeling_bert.py,sha256=zR0US2laTT0yUkL6yyvrR5STQNJcYqtG98ez4SUYQAY,2040
95
+ optimum/rbln/transformers/models/bert/modeling_bert.py,sha256=7MQZS11k4__oyeni5ek2SzRf-gtD3_hMKl_oOzN7_XQ,2263
95
96
  optimum/rbln/transformers/models/blip_2/__init__.py,sha256=L01gPXcUCa8Vg-bcng20vZvBIN_jlqCzwUSFuq0QOag,855
96
97
  optimum/rbln/transformers/models/blip_2/configuration_blip_2.py,sha256=JUUp4SahBYwv_o2dsHMsgESbPCJHgrng5m7wwtd7HRQ,3193
97
- optimum/rbln/transformers/models/blip_2/modeling_blip_2.py,sha256=WrxH-VljAbYEFnxo5tQYE29s7TvoRX0L0IPB2TKN27I,16158
98
+ optimum/rbln/transformers/models/blip_2/modeling_blip_2.py,sha256=pI0HCYIy6SsBc2umWuzXHM6tdu_9e2I5gntoQRoxuhA,16264
98
99
  optimum/rbln/transformers/models/clip/__init__.py,sha256=TLeXDqcFK6M6v9x7Xr64kBbqGu3hFHM7p754dQ8UVQc,938
99
100
  optimum/rbln/transformers/models/clip/configuration_clip.py,sha256=ishrDbTdJm7_AfOn2MPAdAzCWXMdQldwgx9wR_6GcWU,3808
100
101
  optimum/rbln/transformers/models/clip/modeling_clip.py,sha256=knK7gINAluSHcWvg3zaByb3XRLNmSEGw2NcsOGHnIow,12364
@@ -110,7 +111,7 @@ optimum/rbln/transformers/models/decoderonly/generation_decoderonly.py,sha256=4D
110
111
  optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py,sha256=hu2eJr0CpLHnRPSLhyBhyyC6DfosKmPu7lPjapcBCkE,33061
111
112
  optimum/rbln/transformers/models/depth_anything/__init__.py,sha256=xvPSIriMJWyNeVYoVB1Z7YqB4kkHOIkaHq7loNps-dk,756
112
113
  optimum/rbln/transformers/models/depth_anything/configuration_depth_anything.py,sha256=JujBVEUa_zZDXNPr1y-B_PhK5SgFFcY8Ib4EoGjjtmE,989
113
- optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py,sha256=ganUtyPKcdKe5QCJ2atQhblzfAstyYkThYDm_DIecU8,1014
114
+ optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py,sha256=tTmsVaW9Wb2WD3nKRLwp7swn3hbMvgwUEJwwVIfNYEc,1008
114
115
  optimum/rbln/transformers/models/distilbert/__init__.py,sha256=zXL78SOEORTnUN_wrdoaDaYpntG8lcFHvPobM6jC0CI,841
115
116
  optimum/rbln/transformers/models/distilbert/configuration_distilbert.py,sha256=O3BW9JjyYk9PLyiofvOKEgTdMZ_jpIuPfot281pSsyg,984
116
117
  optimum/rbln/transformers/models/distilbert/modeling_distilbert.py,sha256=LUh6zYGa8AR3Yxaj3gtyJRc-czBN3qnHTc-JTAhuqY0,1099
@@ -129,11 +130,15 @@ optimum/rbln/transformers/models/gemma3/__init__.py,sha256=6rugk3615SEt4lh7gduo_
129
130
  optimum/rbln/transformers/models/gemma3/configuration_gemma3.py,sha256=rKjKJhyaIM7YoiLR-q8GAZKIQNzDzcb5X7qf_FJE72M,3398
130
131
  optimum/rbln/transformers/models/gemma3/gemma3_architecture.py,sha256=fpLDAXCe5paWVsfc0tL59JkRQMRF-WNgIzOIb_QpSLU,6191
131
132
  optimum/rbln/transformers/models/gemma3/gemma3_runtime_utils.py,sha256=vYQ9sjRlkfamxZca_hVMQI0ylKeExsV02gOWaYVMjyg,9640
132
- optimum/rbln/transformers/models/gemma3/modeling_gemma3.py,sha256=2Fg1eyCDQ8mwGWdcB_jaq_i_iSEzXr3UWjWXe1Z_Ie8,24079
133
+ optimum/rbln/transformers/models/gemma3/modeling_gemma3.py,sha256=zraPjowA8ni9Lb0NrmsiUai2XdOjgYOOpVnIU1n2jGA,24208
133
134
  optimum/rbln/transformers/models/gpt2/__init__.py,sha256=SsawHMStE3wYRtqkH5EvdTFkCdX0LLmp-QSKFhEBrHo,740
134
135
  optimum/rbln/transformers/models/gpt2/configuration_gpt2.py,sha256=iGdHfzG7plekZcIz-Z5U8lRE4SB8gbJJNcFQJ9l8Myg,1533
135
136
  optimum/rbln/transformers/models/gpt2/gpt2_architecture.py,sha256=MyAWReXmyuHnDpW5HI_TI7psyJZxLujZ9KT5XnNm7nA,2802
136
137
  optimum/rbln/transformers/models/gpt2/modeling_gpt2.py,sha256=DhF6hU3oCYGbZ7UijKCsRfTx-VCkTqqqNwqqMSrjqRE,2230
138
+ optimum/rbln/transformers/models/grounding_dino/__init__.py,sha256=DE7DipZGvrKC6b1T77k4I4X3G70ss8mlr-PrZCaohto,307
139
+ optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py,sha256=b6aeAlAMf0aOoTKAqe5nnBfontu_H3zvIHgOiCNMJ1I,3127
140
+ optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py,sha256=A_YBgvPVHwwKgsGLL0z4MyTKb6Hb6r3y6sU3oVIrKiU,22779
141
+ optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py,sha256=bXAOs2QH4sy2UFoFLUSM6u1_VHouUT5COERLQX20F6Y,46897
137
142
  optimum/rbln/transformers/models/idefics3/__init__.py,sha256=ulxE7HEfXsNJhd25J9Fvi6vggo9aZH9sLKJjWB6LlzQ,814
138
143
  optimum/rbln/transformers/models/idefics3/configuration_idefics3.py,sha256=8BhPLkfE1_ZU0eSm2iTbWQOnVe1q0g99srYHWZM6VJ4,2373
139
144
  optimum/rbln/transformers/models/idefics3/modeling_idefics3.py,sha256=UqKUVZ6pZjP2VMfBa3-dJkLNPDqr3H1wHiOo9LPucjs,19636
@@ -196,6 +201,9 @@ optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py,sha256=4MupGjhe
196
201
  optimum/rbln/transformers/models/siglip/__init__.py,sha256=X1Fc1GUnJ2EIxFx45nbeoW-T2t0OyP3W73C0HD8Vowo,712
197
202
  optimum/rbln/transformers/models/siglip/configuration_siglip.py,sha256=m1h8iDx_X9VmHdJi0sc1a2KsAO3OnpMb4cd9jW2Ic-U,3031
198
203
  optimum/rbln/transformers/models/siglip/modeling_siglip.py,sha256=1TyRaxmhp6mg6UfhQTbZhW26013TE3nVnroYG7EROcU,8033
204
+ optimum/rbln/transformers/models/swin/__init__.py,sha256=gUsLDB8ceNxt53Cf69OT32JuZoRdmmIsRfjRdHTLDd0,698
205
+ optimum/rbln/transformers/models/swin/configuration_swin.py,sha256=iVtpT2jXY5LNkUbbr5J08z97unc43KEhArIZ1tBRzEU,1692
206
+ optimum/rbln/transformers/models/swin/modeling_swin.py,sha256=npQgTCEkonG41HzHzEk-a13NFLJHA-K82HFW2VyR0xc,13968
199
207
  optimum/rbln/transformers/models/t5/__init__.py,sha256=R1Q8Z1vaIdx4rDjeCmm_ZMSgewWaqaI0l93AHwewtew,818
200
208
  optimum/rbln/transformers/models/t5/configuration_t5.py,sha256=nqDbibqykeeWn1TlKk6LmCn-DawTVudMMuBn2c2jds8,1362
201
209
  optimum/rbln/transformers/models/t5/modeling_t5.py,sha256=pdAWBLVknTzbma0Ij-VQ2Qve-frPjxL-AwMyU-zouPY,5123
@@ -229,8 +237,8 @@ optimum/rbln/utils/logging.py,sha256=VKKBmlQSdg6iZCGmAXaWYiW67K84jyp1QJhLQSSjPPE
229
237
  optimum/rbln/utils/model_utils.py,sha256=4k5879Kh75m3x_vS4-qOGfqsOiAvc2kdNFFfvsFvz3k,1748
230
238
  optimum/rbln/utils/runtime_utils.py,sha256=R6uXDbeJP03-FWdd4vthNe2D4aCra5n12E3WB1ifiGM,7933
231
239
  optimum/rbln/utils/save_utils.py,sha256=hG5uOtYmecSXZuGTvCXsTM-SiyZpr5q3InUGCCq_jzQ,3619
232
- optimum/rbln/utils/submodule.py,sha256=w5mgPgncI740gVKMu3S-69DGNdUSI0bTZxegQGcZ98Y,5011
233
- optimum_rbln-0.8.3a3.dist-info/METADATA,sha256=NmmTKyVMLYeulq_6EaNrsurXob_0dJ59oBbz1Eeh2VA,5299
234
- optimum_rbln-0.8.3a3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
235
- optimum_rbln-0.8.3a3.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
236
- optimum_rbln-0.8.3a3.dist-info/RECORD,,
240
+ optimum/rbln/utils/submodule.py,sha256=60NGLFvnhjP1DJg1opdb-FVQDsthcLCwWjW_1WQaasU,5280
241
+ optimum_rbln-0.8.3rc0.dist-info/METADATA,sha256=ls15qV7a7bVTpkphb6aHteuBfil7u1xOzkUuysoRPZg,5300
242
+ optimum_rbln-0.8.3rc0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
243
+ optimum_rbln-0.8.3rc0.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
244
+ optimum_rbln-0.8.3rc0.dist-info/RECORD,,