optimum-rbln 0.2.1a5__py3-none-any.whl → 0.7.2rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. optimum/rbln/__init__.py +14 -2
  2. optimum/rbln/__version__.py +2 -2
  3. optimum/rbln/diffusers/__init__.py +10 -0
  4. optimum/rbln/diffusers/modeling_diffusers.py +115 -23
  5. optimum/rbln/diffusers/models/__init__.py +7 -1
  6. optimum/rbln/diffusers/models/autoencoders/__init__.py +1 -0
  7. optimum/rbln/diffusers/models/autoencoders/vae.py +52 -2
  8. optimum/rbln/diffusers/models/autoencoders/vq_model.py +159 -0
  9. optimum/rbln/diffusers/models/transformers/__init__.py +1 -0
  10. optimum/rbln/diffusers/models/transformers/prior_transformer.py +174 -0
  11. optimum/rbln/diffusers/models/unets/unet_2d_condition.py +57 -14
  12. optimum/rbln/diffusers/pipelines/__init__.py +10 -0
  13. optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py +17 -0
  14. optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +83 -0
  15. optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py +22 -0
  16. optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +22 -0
  17. optimum/rbln/transformers/__init__.py +2 -0
  18. optimum/rbln/transformers/models/__init__.py +12 -2
  19. optimum/rbln/transformers/models/clip/__init__.py +6 -1
  20. optimum/rbln/transformers/models/clip/modeling_clip.py +26 -1
  21. {optimum_rbln-0.2.1a5.dist-info → optimum_rbln-0.7.2rc0.dist-info}/METADATA +1 -1
  22. {optimum_rbln-0.2.1a5.dist-info → optimum_rbln-0.7.2rc0.dist-info}/RECORD +24 -18
  23. {optimum_rbln-0.2.1a5.dist-info → optimum_rbln-0.7.2rc0.dist-info}/WHEEL +0 -0
  24. {optimum_rbln-0.2.1a5.dist-info → optimum_rbln-0.7.2rc0.dist-info}/licenses/LICENSE +0 -0
optimum/rbln/__init__.py CHANGED
@@ -48,6 +48,7 @@ _import_structure = {
48
48
  "RBLNCLIPTextModel",
49
49
  "RBLNCLIPTextModelWithProjection",
50
50
  "RBLNCLIPVisionModel",
51
+ "RBLNCLIPVisionModelWithProjection",
51
52
  "RBLNDPTForDepthEstimation",
52
53
  "RBLNExaoneForCausalLM",
53
54
  "RBLNGemmaForCausalLM",
@@ -74,11 +75,15 @@ _import_structure = {
74
75
  "RBLNBertForMaskedLM",
75
76
  ],
76
77
  "diffusers": [
78
+ "RBLNAutoencoderKL",
79
+ "RBLNControlNetModel",
80
+ "RBLNPriorTransformer",
81
+ "RBLNKandinskyV22InpaintCombinedPipeline",
82
+ "RBLNKandinskyV22InpaintPipeline",
83
+ "RBLNKandinskyV22PriorPipeline",
77
84
  "RBLNStableDiffusionPipeline",
78
85
  "RBLNStableDiffusionXLPipeline",
79
- "RBLNAutoencoderKL",
80
86
  "RBLNUNet2DConditionModel",
81
- "RBLNControlNetModel",
82
87
  "RBLNStableDiffusionImg2ImgPipeline",
83
88
  "RBLNStableDiffusionInpaintPipeline",
84
89
  "RBLNStableDiffusionControlNetImg2ImgPipeline",
@@ -88,6 +93,7 @@ _import_structure = {
88
93
  "RBLNStableDiffusionControlNetPipeline",
89
94
  "RBLNStableDiffusionXLControlNetPipeline",
90
95
  "RBLNStableDiffusionXLControlNetImg2ImgPipeline",
96
+ "RBLNVQModel",
91
97
  "RBLNSD3Transformer2DModel",
92
98
  "RBLNStableDiffusion3Img2ImgPipeline",
93
99
  "RBLNStableDiffusion3InpaintPipeline",
@@ -101,7 +107,11 @@ if TYPE_CHECKING:
101
107
  RBLNAutoencoderKL,
102
108
  RBLNControlNetModel,
103
109
  RBLNDiffusionMixin,
110
+ RBLNKandinskyV22InpaintCombinedPipeline,
111
+ RBLNKandinskyV22InpaintPipeline,
112
+ RBLNKandinskyV22PriorPipeline,
104
113
  RBLNMultiControlNetModel,
114
+ RBLNPriorTransformer,
105
115
  RBLNSD3Transformer2DModel,
106
116
  RBLNStableDiffusion3Img2ImgPipeline,
107
117
  RBLNStableDiffusion3InpaintPipeline,
@@ -117,6 +127,7 @@ if TYPE_CHECKING:
117
127
  RBLNStableDiffusionXLInpaintPipeline,
118
128
  RBLNStableDiffusionXLPipeline,
119
129
  RBLNUNet2DConditionModel,
130
+ RBLNVQModel,
120
131
  )
121
132
  from .modeling import (
122
133
  RBLNBaseModel,
@@ -148,6 +159,7 @@ if TYPE_CHECKING:
148
159
  RBLNCLIPTextModel,
149
160
  RBLNCLIPTextModelWithProjection,
150
161
  RBLNCLIPVisionModel,
162
+ RBLNCLIPVisionModelWithProjection,
151
163
  RBLNDistilBertForQuestionAnswering,
152
164
  RBLNDPTForDepthEstimation,
153
165
  RBLNExaoneForCausalLM,
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.2.1a5'
16
- __version_tuple__ = version_tuple = (0, 2, 1)
15
+ __version__ = version = '0.7.2rc0'
16
+ __version_tuple__ = version_tuple = (0, 7, 2)
@@ -24,6 +24,9 @@ ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES["optimum.rbln"])
24
24
 
25
25
  _import_structure = {
26
26
  "pipelines": [
27
+ "RBLNKandinskyV22InpaintCombinedPipeline",
28
+ "RBLNKandinskyV22InpaintPipeline",
29
+ "RBLNKandinskyV22PriorPipeline",
27
30
  "RBLNStableDiffusionPipeline",
28
31
  "RBLNStableDiffusionXLPipeline",
29
32
  "RBLNStableDiffusionImg2ImgPipeline",
@@ -44,6 +47,8 @@ _import_structure = {
44
47
  "RBLNUNet2DConditionModel",
45
48
  "RBLNControlNetModel",
46
49
  "RBLNSD3Transformer2DModel",
50
+ "RBLNPriorTransformer",
51
+ "RBLNVQModel",
47
52
  ],
48
53
  "modeling_diffusers": [
49
54
  "RBLNDiffusionMixin",
@@ -55,10 +60,15 @@ if TYPE_CHECKING:
55
60
  from .models import (
56
61
  RBLNAutoencoderKL,
57
62
  RBLNControlNetModel,
63
+ RBLNPriorTransformer,
58
64
  RBLNSD3Transformer2DModel,
59
65
  RBLNUNet2DConditionModel,
66
+ RBLNVQModel,
60
67
  )
61
68
  from .pipelines import (
69
+ RBLNKandinskyV22InpaintCombinedPipeline,
70
+ RBLNKandinskyV22InpaintPipeline,
71
+ RBLNKandinskyV22PriorPipeline,
62
72
  RBLNMultiControlNetModel,
63
73
  RBLNStableDiffusion3Img2ImgPipeline,
64
74
  RBLNStableDiffusion3InpaintPipeline,
@@ -23,6 +23,7 @@ from ..modeling import RBLNModel
23
23
  from ..modeling_config import RUNTIME_KEYWORDS, ContextRblnConfig, use_rbln_config
24
24
  from ..utils.decorator_utils import remove_compile_time_kwargs
25
25
  from ..utils.logging import get_logger
26
+ from . import pipelines
26
27
 
27
28
 
28
29
  logger = get_logger(__name__)
@@ -67,6 +68,7 @@ class RBLNDiffusionMixin:
67
68
  """
68
69
 
69
70
  _submodules = []
71
+ _prefix = {}
70
72
 
71
73
  @classmethod
72
74
  @property
@@ -84,25 +86,50 @@ class RBLNDiffusionMixin:
84
86
  ) -> Dict[str, Any]:
85
87
  submodule = getattr(model, submodule_name)
86
88
  submodule_class_name = submodule.__class__.__name__
89
+ if isinstance(submodule, torch.nn.Module):
90
+ if submodule_class_name == "MultiControlNetModel":
91
+ submodule_class_name = "ControlNetModel"
87
92
 
88
- if submodule_class_name == "MultiControlNetModel":
89
- submodule_class_name = "ControlNetModel"
93
+ submodule_cls: RBLNModel = getattr(importlib.import_module("optimum.rbln"), f"RBLN{submodule_class_name}")
90
94
 
91
- submodule_cls: RBLNModel = getattr(importlib.import_module("optimum.rbln"), f"RBLN{submodule_class_name}")
95
+ submodule_config = rbln_config.get(submodule_name, {})
96
+ submodule_config = copy.deepcopy(submodule_config)
92
97
 
93
- submodule_config = rbln_config.get(submodule_name, {})
94
- submodule_config = copy.deepcopy(submodule_config)
98
+ pipe_global_config = {k: v for k, v in rbln_config.items() if k not in cls._submodules}
95
99
 
96
- pipe_global_config = {k: v for k, v in rbln_config.items() if k not in cls._submodules}
100
+ submodule_config.update({k: v for k, v in pipe_global_config.items() if k not in submodule_config})
101
+ submodule_config.update(
102
+ {
103
+ "img2img_pipeline": cls.img2img_pipeline,
104
+ "inpaint_pipeline": cls.inpaint_pipeline,
105
+ }
106
+ )
107
+ submodule_config = submodule_cls.update_rbln_config_using_pipe(model, submodule_config)
108
+ elif hasattr(pipelines, submodule_class_name):
109
+ submodule_config = rbln_config.get(submodule_name, {})
110
+ submodule_config = copy.deepcopy(submodule_config)
111
+
112
+ submodule_cls: RBLNModel = getattr(importlib.import_module("optimum.rbln"), f"{submodule_class_name}")
113
+ prefix = cls._prefix.get(submodule_name, "")
114
+ connected_submodules = cls._connected_classes.get(submodule_name)._submodules
115
+ for connected_submodule_name in connected_submodules:
116
+ connected_submodule_config = rbln_config.pop(prefix + connected_submodule_name, {})
117
+ if connected_submodule_name in submodule_config:
118
+ submodule_config[connected_submodule_name].update(connected_submodule_config)
119
+ else:
120
+ submodule_config[connected_submodule_name] = connected_submodule_config
97
121
 
98
- submodule_config.update({k: v for k, v in pipe_global_config.items() if k not in submodule_config})
99
- submodule_config.update(
100
- {
101
- "img2img_pipeline": cls.img2img_pipeline,
102
- "inpaint_pipeline": cls.inpaint_pipeline,
103
- }
104
- )
105
- submodule_config = submodule_cls.update_rbln_config_using_pipe(model, submodule_config)
122
+ submodules = copy.deepcopy(cls._submodules)
123
+ submodules += [prefix + connected_submodule_name for connected_submodule_name in connected_submodules]
124
+
125
+ pipe_global_config = {k: v for k, v in rbln_config.items() if k not in submodules}
126
+ for connected_submodule_name in connected_submodules:
127
+ submodule_config[connected_submodule_name].update(
128
+ {k: v for k, v in pipe_global_config.items() if k not in submodule_config}
129
+ )
130
+ rbln_config[submodule_name] = submodule_config
131
+ else:
132
+ raise ValueError(f"submodule {submodule_name} isn't supported")
106
133
  return submodule_config
107
134
 
108
135
  @staticmethod
@@ -165,8 +192,26 @@ class RBLNDiffusionMixin:
165
192
 
166
193
  else:
167
194
  # raise error if any of submodules are torch module.
168
- model_index_config = None
169
- for submodule_name in cls._submodules:
195
+ model_index_config = cls.load_config(pretrained_model_name_or_path=model_id)
196
+ if cls._load_connected_pipes:
197
+ submodules = []
198
+ for submodule in cls._submodules:
199
+ submodule_config = rbln_config.pop(submodule, {})
200
+ prefix = cls._prefix.get(submodule, "")
201
+ connected_submodules = cls._connected_classes.get(submodule)._submodules
202
+ for connected_submodule_name in connected_submodules:
203
+ connected_submodule_config = submodule_config.pop(connected_submodule_name, {})
204
+ if connected_submodule_config:
205
+ rbln_config[prefix + connected_submodule_name] = connected_submodule_config
206
+ submodules.append(prefix + connected_submodule_name)
207
+ pipe_global_config = {k: v for k, v in rbln_config.items() if k not in submodules}
208
+ for submodule in submodules:
209
+ if submodule in rbln_config:
210
+ rbln_config[submodule].update(pipe_global_config)
211
+ else:
212
+ submodules = cls._submodules
213
+
214
+ for submodule_name in submodules:
170
215
  if isinstance(kwargs.get(submodule_name), torch.nn.Module):
171
216
  raise AssertionError(
172
217
  f"{submodule_name} is not compiled torch module. If you want to compile, set `export=True`."
@@ -181,9 +226,6 @@ class RBLNDiffusionMixin:
181
226
  if not any(kwd in submodule_config for kwd in RUNTIME_KEYWORDS):
182
227
  continue
183
228
 
184
- if model_index_config is None:
185
- model_index_config = cls.load_config(pretrained_model_name_or_path=model_id)
186
-
187
229
  module_name, class_name = model_index_config[submodule_name]
188
230
  if module_name != "optimum.rbln":
189
231
  raise ValueError(
@@ -228,6 +270,7 @@ class RBLNDiffusionMixin:
228
270
  passed_submodules: Dict[str, RBLNModel],
229
271
  model_save_dir: Optional[PathLike],
230
272
  rbln_config: Dict[str, Any],
273
+ prefix: Optional[str] = "",
231
274
  ) -> Dict[str, RBLNModel]:
232
275
  compiled_submodules = {}
233
276
 
@@ -245,17 +288,54 @@ class RBLNDiffusionMixin:
245
288
  controlnets=submodule,
246
289
  model_save_dir=model_save_dir,
247
290
  controlnet_rbln_config=submodule_rbln_config,
291
+ prefix=prefix,
248
292
  )
249
293
  elif isinstance(submodule, torch.nn.Module):
250
294
  submodule_cls: RBLNModel = getattr(
251
295
  importlib.import_module("optimum.rbln"), f"RBLN{submodule.__class__.__name__}"
252
296
  )
297
+ subfolder = prefix + submodule_name
253
298
  submodule = submodule_cls.from_model(
254
299
  model=submodule,
255
- subfolder=submodule_name,
300
+ subfolder=subfolder,
256
301
  model_save_dir=model_save_dir,
257
302
  rbln_config=submodule_rbln_config,
258
303
  )
304
+ elif hasattr(pipelines, submodule.__class__.__name__):
305
+ connected_pipe = submodule
306
+ connected_pipe_model_save_dir = model_save_dir
307
+ connected_pipe_rbln_config = submodule_rbln_config
308
+ connected_pipe_cls: RBLNDiffusionMixin = getattr(
309
+ importlib.import_module("optimum.rbln"), connected_pipe.__class__.__name__
310
+ )
311
+ submodule_dict = {}
312
+ for name in connected_pipe.config.keys():
313
+ if hasattr(connected_pipe, name):
314
+ submodule_dict[name] = getattr(connected_pipe, name)
315
+ connected_pipe = connected_pipe_cls(**submodule_dict)
316
+ connected_pipe_submodules = {}
317
+ prefix = cls._prefix.get(submodule_name, "")
318
+ for name in connected_pipe_cls._submodules:
319
+ if prefix + name in passed_submodules:
320
+ connected_pipe_submodules[name] = passed_submodules.get(prefix + name)
321
+
322
+ connected_pipe_compiled_submodules = connected_pipe_cls._compile_submodules(
323
+ model=connected_pipe,
324
+ passed_submodules=connected_pipe_submodules,
325
+ model_save_dir=model_save_dir,
326
+ rbln_config=connected_pipe_rbln_config,
327
+ prefix=prefix,
328
+ )
329
+ connected_pipe = connected_pipe_cls._construct_pipe(
330
+ connected_pipe,
331
+ connected_pipe_compiled_submodules,
332
+ connected_pipe_model_save_dir,
333
+ connected_pipe_rbln_config,
334
+ )
335
+
336
+ for name in connected_pipe_cls._submodules:
337
+ compiled_submodules[prefix + name] = getattr(connected_pipe, name)
338
+ submodule = connected_pipe
259
339
  else:
260
340
  raise ValueError(f"Unknown class of submodule({submodule_name}) : {submodule.__class__.__name__} ")
261
341
 
@@ -268,6 +348,7 @@ class RBLNDiffusionMixin:
268
348
  controlnets: "MultiControlNetModel",
269
349
  model_save_dir: Optional[PathLike],
270
350
  controlnet_rbln_config: Dict[str, Any],
351
+ prefix: Optional[str] = "",
271
352
  ):
272
353
  # Compile multiple ControlNet models for a MultiControlNet setup
273
354
  from .models.controlnet import RBLNControlNetModel
@@ -276,7 +357,7 @@ class RBLNDiffusionMixin:
276
357
  compiled_controlnets = [
277
358
  RBLNControlNetModel.from_model(
278
359
  model=controlnet,
279
- subfolder="controlnet" if i == 0 else f"controlnet_{i}",
360
+ subfolder=f"{prefix}controlnet" if i == 0 else f"{prefix}controlnet_{i}",
280
361
  model_save_dir=model_save_dir,
281
362
  rbln_config=controlnet_rbln_config,
282
363
  )
@@ -287,10 +368,21 @@ class RBLNDiffusionMixin:
287
368
  @classmethod
288
369
  def _construct_pipe(cls, model, submodules, model_save_dir, rbln_config):
289
370
  # Construct finalize pipe setup with compiled submodules and configurations
371
+ submodule_names = []
372
+ for submodule_name in cls._submodules:
373
+ submodule = getattr(model, submodule_name)
374
+ if hasattr(pipelines, submodule.__class__.__name__):
375
+ prefix = cls._prefix.get(submodule_name, "")
376
+ connected_pipe_submodules = submodules[submodule_name].__class__._submodules
377
+ connected_pipe_submodules = [prefix + name for name in connected_pipe_submodules]
378
+ submodule_names += connected_pipe_submodules
379
+ setattr(model, submodule_name, submodules[submodule_name])
380
+ else:
381
+ submodule_names.append(submodule_name)
290
382
 
291
383
  if model_save_dir is not None:
292
384
  # To skip saving original pytorch modules
293
- for submodule_name in cls._submodules:
385
+ for submodule_name in submodule_names:
294
386
  delattr(model, submodule_name)
295
387
 
296
388
  # Direct calling of `save_pretrained` causes config.unet = (None, None).
@@ -300,7 +392,7 @@ class RBLNDiffusionMixin:
300
392
  # Causing warning messeages.
301
393
 
302
394
  update_dict = {}
303
- for submodule_name in cls._submodules:
395
+ for submodule_name in submodule_names:
304
396
  # replace submodule
305
397
  setattr(model, submodule_name, submodules[submodule_name])
306
398
  update_dict[submodule_name] = ("optimum.rbln", submodules[submodule_name].__class__.__name__)
@@ -20,20 +20,26 @@ from transformers.utils import _LazyModule
20
20
  _import_structure = {
21
21
  "autoencoders": [
22
22
  "RBLNAutoencoderKL",
23
+ "RBLNVQModel",
23
24
  ],
24
25
  "unets": [
25
26
  "RBLNUNet2DConditionModel",
26
27
  ],
27
28
  "controlnet": ["RBLNControlNetModel"],
28
- "transformers": ["RBLNSD3Transformer2DModel"],
29
+ "transformers": [
30
+ "RBLNPriorTransformer",
31
+ "RBLNSD3Transformer2DModel",
32
+ ],
29
33
  }
30
34
 
31
35
  if TYPE_CHECKING:
32
36
  from .autoencoders import (
33
37
  RBLNAutoencoderKL,
38
+ RBLNVQModel,
34
39
  )
35
40
  from .controlnet import RBLNControlNetModel
36
41
  from .transformers import (
42
+ RBLNPriorTransformer,
37
43
  RBLNSD3Transformer2DModel,
38
44
  )
39
45
  from .unets import (
@@ -13,3 +13,4 @@
13
13
  # limitations under the License.
14
14
 
15
15
  from .autoencoder_kl import RBLNAutoencoderKL
16
+ from .vq_model import RBLNVQModel
@@ -12,11 +12,12 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from typing import TYPE_CHECKING
15
+ from typing import TYPE_CHECKING, List
16
16
 
17
17
  import torch # noqa: I001
18
- from diffusers import AutoencoderKL
18
+ from diffusers import AutoencoderKL, VQModel
19
19
  from diffusers.models.autoencoders.vae import DiagonalGaussianDistribution
20
+ from diffusers.models.autoencoders.vq_model import VQEncoderOutput
20
21
  from diffusers.models.modeling_outputs import AutoencoderKLOutput
21
22
 
22
23
  from ....utils.logging import get_logger
@@ -72,3 +73,52 @@ class _VAEEncoder(torch.nn.Module):
72
73
  def forward(self, x):
73
74
  vae_out = _VAEEncoder.encode(self.vae, x, return_dict=False)
74
75
  return vae_out
76
+
77
+
78
+ class RBLNRuntimeVQEncoder(RBLNPytorchRuntime):
79
+ def encode(self, x: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
80
+ h = self.forward(x.contiguous())
81
+ return VQEncoderOutput(latents=h)
82
+
83
+
84
+ class RBLNRuntimeVQDecoder(RBLNPytorchRuntime):
85
+ def decode(self, h: torch.Tensor, force_not_quantize: bool = False, shape=None, **kwargs) -> List[torch.Tensor]:
86
+ if not (force_not_quantize and not self.lookup_from_codebook):
87
+ raise ValueError(
88
+ "Currently, the `decode` method of the class `RBLNVQModel` is executed successfully only if `force_not_quantize` is True and `config.lookup_from_codebook` is False"
89
+ )
90
+ commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
91
+ dec = self.forward(h.contiguous())
92
+ return dec, commit_loss
93
+
94
+
95
+ class _VQEncoder(torch.nn.Module):
96
+ def __init__(self, vq_model: VQModel):
97
+ super().__init__()
98
+ self.vq_model = vq_model
99
+
100
+ def encode(self, x: torch.Tensor, return_dict: bool = True):
101
+ h = self.vq_model.encoder(x)
102
+ h = self.vq_model.quant_conv(h)
103
+ return h
104
+
105
+ def forward(self, x: torch.Tensor):
106
+ vq_out = self.encode(x)
107
+ return vq_out
108
+
109
+
110
+ class _VQDecoder(torch.nn.Module):
111
+ def __init__(self, vq_model: VQModel):
112
+ super().__init__()
113
+ self.vq_model = vq_model
114
+
115
+ def decode(self, h: torch.Tensor, force_not_quantize: bool = False, return_dict: bool = True, shape=None):
116
+ quant = h
117
+ quant2 = self.vq_model.post_quant_conv(quant)
118
+ quant = quant if self.vq_model.config.norm_type == "spatial" else None
119
+ dec = self.vq_model.decoder(quant2, quant)
120
+ return dec
121
+
122
+ def forward(self, h: torch.Tensor):
123
+ vq_out = self.decode(h)
124
+ return vq_out
@@ -0,0 +1,159 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
+
17
+ import rebel
18
+ import torch
19
+ from diffusers import VQModel
20
+ from diffusers.models.autoencoders.vae import DecoderOutput
21
+ from diffusers.models.autoencoders.vq_model import VQEncoderOutput
22
+ from transformers import PretrainedConfig
23
+
24
+ from ....modeling import RBLNModel
25
+ from ....modeling_config import DEFAULT_COMPILED_MODEL_NAME, RBLNCompileConfig, RBLNConfig
26
+ from ....utils.logging import get_logger
27
+ from ...modeling_diffusers import RBLNDiffusionMixin
28
+ from .vae import RBLNRuntimeVQDecoder, RBLNRuntimeVQEncoder, _VQDecoder, _VQEncoder
29
+
30
+
31
+ if TYPE_CHECKING:
32
+ from transformers import AutoFeatureExtractor, AutoProcessor, AutoTokenizer
33
+
34
+ logger = get_logger(__name__)
35
+
36
+
37
+ class RBLNVQModel(RBLNModel):
38
+ auto_model_class = VQModel
39
+ config_name = "config.json"
40
+ hf_library_name = "diffusers"
41
+
42
+ def __post_init__(self, **kwargs):
43
+ super().__post_init__(**kwargs)
44
+
45
+ self.encoder = RBLNRuntimeVQEncoder(runtime=self.model[0], main_input_name="x")
46
+ self.decoder = RBLNRuntimeVQDecoder(runtime=self.model[1], main_input_name="z")
47
+ self.decoder.lookup_from_codebook = self.config.lookup_from_codebook
48
+ height = self.rbln_config.model_cfg.get("img_height", 512)
49
+ width = self.rbln_config.model_cfg.get("img_width", 512)
50
+ self.image_size = [height, width]
51
+
52
+ @classmethod
53
+ def get_compiled_model(cls, model, rbln_config: RBLNConfig):
54
+ encoder_model = _VQEncoder(model)
55
+ decoder_model = _VQDecoder(model)
56
+ encoder_model.eval()
57
+ decoder_model.eval()
58
+
59
+ enc_compiled_model = cls.compile(encoder_model, rbln_compile_config=rbln_config.compile_cfgs[0])
60
+ dec_compiled_model = cls.compile(decoder_model, rbln_compile_config=rbln_config.compile_cfgs[1])
61
+
62
+ return {"encoder": enc_compiled_model, "decoder": dec_compiled_model}
63
+
64
+ @classmethod
65
+ def update_rbln_config_using_pipe(cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]) -> Dict[str, Any]:
66
+ batch_size = rbln_config.get("batch_size")
67
+ if batch_size is None:
68
+ batch_size = 1
69
+ img_height = rbln_config.get("img_height")
70
+ if img_height is None:
71
+ img_height = 512
72
+ img_width = rbln_config.get("img_width")
73
+ if img_width is None:
74
+ img_width = 512
75
+
76
+ rbln_config.update(
77
+ {
78
+ "batch_size": batch_size,
79
+ "img_height": img_height,
80
+ "img_width": img_width,
81
+ }
82
+ )
83
+
84
+ return rbln_config
85
+
86
+ @classmethod
87
+ def _get_rbln_config(
88
+ cls,
89
+ preprocessors: Union["AutoFeatureExtractor", "AutoProcessor", "AutoTokenizer"],
90
+ model_config: "PretrainedConfig",
91
+ rbln_kwargs: Dict[str, Any] = {},
92
+ ) -> RBLNConfig:
93
+ batch_size = rbln_kwargs.get("batch_size") or 1
94
+ height = rbln_kwargs.get("img_height") or 512
95
+ width = rbln_kwargs.get("img_width") or 512
96
+
97
+ if hasattr(model_config, "block_out_channels"):
98
+ scale_factor = 2 ** (len(model_config.block_out_channels) - 1)
99
+ else:
100
+ # image processor default value 8 (int)
101
+ scale_factor = 8
102
+
103
+ enc_shape = (height, width)
104
+ dec_shape = (height // scale_factor, width // scale_factor)
105
+
106
+ enc_input_info = [
107
+ (
108
+ "x",
109
+ [batch_size, model_config.in_channels, enc_shape[0], enc_shape[1]],
110
+ "float32",
111
+ )
112
+ ]
113
+ dec_input_info = [
114
+ (
115
+ "h",
116
+ [batch_size, model_config.latent_channels, dec_shape[0], dec_shape[1]],
117
+ "float32",
118
+ )
119
+ ]
120
+
121
+ enc_rbln_compile_config = RBLNCompileConfig(compiled_model_name="encoder", input_info=enc_input_info)
122
+ dec_rbln_compile_config = RBLNCompileConfig(compiled_model_name="decoder", input_info=dec_input_info)
123
+
124
+ compile_cfgs = [enc_rbln_compile_config, dec_rbln_compile_config]
125
+ rbln_config = RBLNConfig(
126
+ rbln_cls=cls.__name__,
127
+ compile_cfgs=compile_cfgs,
128
+ rbln_kwargs=rbln_kwargs,
129
+ )
130
+ return rbln_config
131
+
132
+ @classmethod
133
+ def _create_runtimes(
134
+ cls,
135
+ compiled_models: List[rebel.RBLNCompiledModel],
136
+ rbln_device_map: Dict[str, int],
137
+ activate_profiler: Optional[bool] = None,
138
+ ) -> List[rebel.Runtime]:
139
+ if len(compiled_models) == 1:
140
+ device_val = rbln_device_map[DEFAULT_COMPILED_MODEL_NAME]
141
+ return [
142
+ compiled_models[0].create_runtime(
143
+ tensor_type="pt", device=device_val, activate_profiler=activate_profiler
144
+ )
145
+ ]
146
+
147
+ device_vals = [rbln_device_map["encoder"], rbln_device_map["decoder"]]
148
+ return [
149
+ compiled_model.create_runtime(tensor_type="pt", device=device_val, activate_profiler=activate_profiler)
150
+ for compiled_model, device_val in zip(compiled_models, device_vals)
151
+ ]
152
+
153
+ def encode(self, x: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
154
+ posterior = self.encoder.encode(x)
155
+ return VQEncoderOutput(latents=posterior)
156
+
157
+ def decode(self, h: torch.FloatTensor, **kwargs) -> torch.FloatTensor:
158
+ dec, commit_loss = self.decoder.decode(h, **kwargs)
159
+ return DecoderOutput(sample=dec, commit_loss=commit_loss)
@@ -12,4 +12,5 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ from .prior_transformer import RBLNPriorTransformer
15
16
  from .transformer_sd3 import RBLNSD3Transformer2DModel
@@ -0,0 +1,174 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from pathlib import Path
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ import torch
19
+ from diffusers.models.transformers.prior_transformer import PriorTransformer, PriorTransformerOutput
20
+ from transformers import PretrainedConfig, PreTrainedModel
21
+
22
+ from ....modeling import RBLNModel
23
+ from ....modeling_config import RBLNCompileConfig, RBLNConfig
24
+ from ....utils.logging import get_logger
25
+ from ....utils.runtime_utils import RBLNPytorchRuntime
26
+ from ...modeling_diffusers import RBLNDiffusionMixin
27
+
28
+
29
+ logger = get_logger(__name__)
30
+
31
+
32
+ class RBLNRuntimePriorTransformer(RBLNPytorchRuntime):
33
+ def forward(
34
+ self, hidden_states, timestep, proj_embedding, encoder_hidden_states, attention_mask, return_dict: bool = True
35
+ ):
36
+ predicted_image_embedding = super().forward(
37
+ hidden_states,
38
+ timestep,
39
+ proj_embedding,
40
+ encoder_hidden_states,
41
+ attention_mask,
42
+ )
43
+ if return_dict:
44
+ return PriorTransformerOutput(predicted_image_embedding=predicted_image_embedding)
45
+ else:
46
+ return (predicted_image_embedding,)
47
+
48
+
49
+ class _PriorTransformer(torch.nn.Module):
50
+ def __init__(self, prior: PriorTransformer):
51
+ super().__init__()
52
+ self._prior = prior
53
+
54
+ def forward(
55
+ self,
56
+ hidden_states,
57
+ timestep,
58
+ proj_embedding,
59
+ encoder_hidden_states,
60
+ attention_mask,
61
+ return_dict=True,
62
+ ):
63
+ return self._prior.forward(
64
+ hidden_states,
65
+ timestep,
66
+ proj_embedding,
67
+ encoder_hidden_states,
68
+ attention_mask,
69
+ return_dict=False,
70
+ )
71
+
72
+
73
+ class RBLNPriorTransformer(RBLNModel):
74
+ hf_library_name = "diffusers"
75
+ auto_model_class = PriorTransformer
76
+
77
+ def __post_init__(self, **kwargs):
78
+ super().__post_init__(**kwargs)
79
+ self.runtime = RBLNRuntimePriorTransformer(runtime=self.model[0])
80
+ artifacts = torch.load(self.model_save_dir / self.subfolder / "torch_artifacts.pth", weights_only=False)
81
+ self.clip_mean = artifacts["clip_mean"]
82
+ self.clip_std = artifacts["clip_std"]
83
+
84
+ @classmethod
85
+ def wrap_model_if_needed(cls, model: torch.nn.Module, rbln_config: RBLNConfig) -> torch.nn.Module:
86
+ return _PriorTransformer(model).eval()
87
+
88
+ @classmethod
89
+ def update_rbln_config_using_pipe(cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]) -> Dict[str, Any]:
90
+ batch_size = rbln_config.get("batch_size")
91
+ if not batch_size:
92
+ do_classifier_free_guidance = rbln_config.get("guidance_scale", 5.0) > 1.0
93
+ batch_size = 2 if do_classifier_free_guidance else 1
94
+ else:
95
+ if rbln_config.get("guidance_scale"):
96
+ logger.warning(
97
+ "guidance_scale is ignored because batch size is explicitly specified. "
98
+ "To ensure consistent behavior, consider removing the guidance scale or "
99
+ "adjusting the batch size configuration as needed."
100
+ )
101
+ embedding_dim = rbln_config.get("embedding_dim", pipe.prior.config.embedding_dim)
102
+ num_embeddings = rbln_config.get("num_embeddings", pipe.prior.config.num_embeddings)
103
+
104
+ rbln_config.update(
105
+ {
106
+ "batch_size": batch_size,
107
+ "embedding_dim": embedding_dim,
108
+ "num_embeddings": num_embeddings,
109
+ }
110
+ )
111
+
112
+ return rbln_config
113
+
114
+ @classmethod
115
+ def save_torch_artifacts(
116
+ cls,
117
+ model: "PreTrainedModel",
118
+ save_dir_path: Path,
119
+ subfolder: str,
120
+ rbln_config: RBLNConfig,
121
+ ):
122
+ save_dict = {}
123
+ save_dict["clip_mean"] = model.clip_mean
124
+ save_dict["clip_std"] = model.clip_std
125
+ torch.save(save_dict, save_dir_path / subfolder / "torch_artifacts.pth")
126
+
127
+ @classmethod
128
+ def _get_rbln_config(
129
+ cls,
130
+ preprocessors,
131
+ model_config: PretrainedConfig,
132
+ rbln_kwargs,
133
+ ) -> RBLNConfig:
134
+ batch_size = rbln_kwargs.get("batch_size") or 1
135
+ embedding_dim = rbln_kwargs.get("embedding_dim") or model_config.embedding_dim
136
+ num_embeddings = rbln_kwargs.get("num_embeddings") or model_config.num_embeddings
137
+
138
+ input_info = [
139
+ ("hidden_states", [batch_size, embedding_dim], "float32"),
140
+ ("timestep", [], "float32"),
141
+ ("proj_embedding", [batch_size, embedding_dim], "float32"),
142
+ ("encoder_hidden_states", [batch_size, num_embeddings, embedding_dim], "float32"),
143
+ ("attention_mask", [batch_size, num_embeddings], "float32"),
144
+ ]
145
+
146
+ rbln_compile_config = RBLNCompileConfig(input_info=input_info)
147
+ rbln_config = RBLNConfig(
148
+ rbln_cls=cls.__name__,
149
+ compile_cfgs=[rbln_compile_config],
150
+ rbln_kwargs=rbln_kwargs,
151
+ )
152
+ return rbln_config
153
+
154
+ def forward(
155
+ self,
156
+ hidden_states,
157
+ timestep: Union[torch.Tensor, float, int],
158
+ proj_embedding: torch.Tensor,
159
+ encoder_hidden_states: Optional[torch.Tensor] = None,
160
+ attention_mask: Optional[torch.BoolTensor] = None,
161
+ return_dict: bool = True,
162
+ ):
163
+ return self.runtime.forward(
164
+ hidden_states.contiguous(),
165
+ timestep.float(),
166
+ proj_embedding,
167
+ encoder_hidden_states,
168
+ attention_mask.float(),
169
+ return_dict,
170
+ )
171
+
172
+ def post_process_latents(self, prior_latents):
173
+ prior_latents = (prior_latents * self.clip_std) + self.clip_mean
174
+ return prior_latents
@@ -115,6 +115,29 @@ class _UNet_SDXL(torch.nn.Module):
115
115
  return unet_out
116
116
 
117
117
 
118
+ class _UNet_Kandinsky(torch.nn.Module):
119
+ def __init__(self, unet: "UNet2DConditionModel"):
120
+ super().__init__()
121
+ self.unet = unet
122
+
123
+ def forward(
124
+ self,
125
+ sample: torch.Tensor,
126
+ timestep: Union[torch.Tensor, float, int],
127
+ image_embeds: torch.Tensor,
128
+ ) -> torch.Tensor:
129
+ added_cond_kwargs = {"image_embeds": image_embeds}
130
+
131
+ unet_out = self.unet(
132
+ sample=sample,
133
+ timestep=timestep,
134
+ encoder_hidden_states=None,
135
+ added_cond_kwargs=added_cond_kwargs,
136
+ return_dict=False,
137
+ )
138
+ return unet_out
139
+
140
+
118
141
  class RBLNUNet2DConditionModel(RBLNModel):
119
142
  hf_library_name = "diffusers"
120
143
  auto_model_class = UNet2DConditionModel
@@ -138,6 +161,8 @@ class RBLNUNet2DConditionModel(RBLNModel):
138
161
  def wrap_model_if_needed(cls, model: torch.nn.Module, rbln_config: RBLNConfig) -> torch.nn.Module:
139
162
  if model.config.addition_embed_type == "text_time":
140
163
  return _UNet_SDXL(model).eval()
164
+ elif model.config.addition_embed_type == "image":
165
+ return _UNet_Kandinsky(model).eval()
141
166
  else:
142
167
  return _UNet_SD(model).eval()
143
168
 
@@ -146,6 +171,7 @@ class RBLNUNet2DConditionModel(RBLNModel):
146
171
  cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]
147
172
  ) -> Union[int, Tuple[int, int]]:
148
173
  image_size = (rbln_config.get("img_height"), rbln_config.get("img_width"))
174
+ scale_factor = pipe.movq_scale_factor if hasattr(pipe, "movq_scale_factor") else pipe.vae_scale_factor
149
175
  if (image_size[0] is None) != (image_size[1] is None):
150
176
  raise ValueError("Both image height and image width must be given or not given")
151
177
  elif image_size[0] is None and image_size[1] is None:
@@ -153,22 +179,23 @@ class RBLNUNet2DConditionModel(RBLNModel):
153
179
  # In case of img2img, sample size of unet is determined by vae encoder.
154
180
  vae_sample_size = pipe.vae.config.sample_size
155
181
  if isinstance(vae_sample_size, int):
156
- sample_size = vae_sample_size // pipe.vae_scale_factor
182
+ sample_size = vae_sample_size // scale_factor
157
183
  else:
158
184
  sample_size = (
159
- vae_sample_size[0] // pipe.vae_scale_factor,
160
- vae_sample_size[1] // pipe.vae_scale_factor,
185
+ vae_sample_size[0] // scale_factor,
186
+ vae_sample_size[1] // scale_factor,
161
187
  )
162
188
  else:
163
189
  sample_size = pipe.unet.config.sample_size
164
190
  else:
165
- sample_size = (image_size[0] // pipe.vae_scale_factor, image_size[1] // pipe.vae_scale_factor)
191
+ sample_size = (image_size[0] // scale_factor, image_size[1] // scale_factor)
166
192
 
167
193
  return sample_size
168
194
 
169
195
  @classmethod
170
196
  def update_rbln_config_using_pipe(cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]) -> Dict[str, Any]:
171
197
  text_model_hidden_size = pipe.text_encoder_2.config.hidden_size if hasattr(pipe, "text_encoder_2") else None
198
+ image_model_hidden_size = pipe.unet.config.encoder_hid_dim if hasattr(pipe, "unet") else None
172
199
 
173
200
  batch_size = rbln_config.get("batch_size")
174
201
  if not batch_size:
@@ -184,10 +211,12 @@ class RBLNUNet2DConditionModel(RBLNModel):
184
211
  "adjusting the batch size configuration as needed."
185
212
  )
186
213
 
214
+ max_seq_len = pipe.text_encoder.config.max_position_embeddings if hasattr(pipe, "text_encoder") else None
187
215
  rbln_config.update(
188
216
  {
189
- "max_seq_len": pipe.text_encoder.config.max_position_embeddings,
217
+ "max_seq_len": max_seq_len,
190
218
  "text_model_hidden_size": text_model_hidden_size,
219
+ "image_model_hidden_size": image_model_hidden_size,
191
220
  "sample_size": cls.get_unet_sample_size(pipe, rbln_config),
192
221
  "batch_size": batch_size,
193
222
  "is_controlnet": "controlnet" in pipe.config.keys(),
@@ -218,15 +247,16 @@ class RBLNUNet2DConditionModel(RBLNModel):
218
247
  if isinstance(sample_size, int):
219
248
  sample_size = (sample_size, sample_size)
220
249
 
221
- if max_seq_len is None:
222
- raise ValueError("`rbln_max_seq_len` (ex. text_encoder's max_position_embeddings) must be specified.")
223
-
224
250
  input_info = [
225
251
  ("sample", [batch_size, model_config.in_channels, sample_size[0], sample_size[1]], "float32"),
226
252
  ("timestep", [], "float32"),
227
- ("encoder_hidden_states", [batch_size, max_seq_len, model_config.cross_attention_dim], "float32"),
228
253
  ]
229
254
 
255
+ if max_seq_len is not None:
256
+ input_info.append(
257
+ ("encoder_hidden_states", [batch_size, max_seq_len, model_config.cross_attention_dim], "float32"),
258
+ )
259
+
230
260
  if is_controlnet:
231
261
  # down block addtional residuals
232
262
  first_shape = [batch_size, model_config.block_out_channels[0], sample_size[0], sample_size[1]]
@@ -256,11 +286,15 @@ class RBLNUNet2DConditionModel(RBLNModel):
256
286
  ]
257
287
  input_info.append(("mid_block_additional_residual", shape, "float32"))
258
288
 
259
- if hasattr(model_config, "addition_embed_type") and model_config.addition_embed_type == "text_time":
260
- rbln_text_model_hidden_size = rbln_kwargs["text_model_hidden_size"]
261
- rbln_in_features = model_config.projection_class_embeddings_input_dim
262
- input_info.append(("text_embeds", [batch_size, rbln_text_model_hidden_size], "float32"))
263
- input_info.append(("time_ids", [batch_size, 6], "float32"))
289
+ if hasattr(model_config, "addition_embed_type"):
290
+ if model_config.addition_embed_type == "text_time":
291
+ rbln_text_model_hidden_size = rbln_kwargs["text_model_hidden_size"]
292
+ rbln_in_features = model_config.projection_class_embeddings_input_dim
293
+ input_info.append(("text_embeds", [batch_size, rbln_text_model_hidden_size], "float32"))
294
+ input_info.append(("time_ids", [batch_size, 6], "float32"))
295
+ elif model_config.addition_embed_type == "image":
296
+ rbln_image_model_hidden_size = rbln_kwargs["image_model_hidden_size"]
297
+ input_info.append(("image_embeds", [batch_size, rbln_image_model_hidden_size], "float32"))
264
298
 
265
299
  rbln_compile_config = RBLNCompileConfig(input_info=input_info)
266
300
 
@@ -323,6 +357,15 @@ class RBLNUNet2DConditionModel(RBLNModel):
323
357
  ),
324
358
  )
325
359
 
360
+ if "image_embeds" in added_cond_kwargs:
361
+ return (
362
+ super().forward(
363
+ sample.contiguous(),
364
+ timestep.float(),
365
+ **added_cond_kwargs,
366
+ ),
367
+ )
368
+
326
369
  return (
327
370
  super().forward(
328
371
  sample.contiguous(),
@@ -25,6 +25,11 @@ _import_structure = {
25
25
  "RBLNStableDiffusionXLControlNetImg2ImgPipeline",
26
26
  "RBLNStableDiffusionXLControlNetPipeline",
27
27
  ],
28
+ "kandinsky2_2": [
29
+ "RBLNKandinskyV22InpaintCombinedPipeline",
30
+ "RBLNKandinskyV22InpaintPipeline",
31
+ "RBLNKandinskyV22PriorPipeline",
32
+ ],
28
33
  "stable_diffusion": [
29
34
  "RBLNStableDiffusionImg2ImgPipeline",
30
35
  "RBLNStableDiffusionPipeline",
@@ -49,6 +54,11 @@ if TYPE_CHECKING:
49
54
  RBLNStableDiffusionXLControlNetImg2ImgPipeline,
50
55
  RBLNStableDiffusionXLControlNetPipeline,
51
56
  )
57
+ from .kandinsky2_2 import (
58
+ RBLNKandinskyV22InpaintCombinedPipeline,
59
+ RBLNKandinskyV22InpaintPipeline,
60
+ RBLNKandinskyV22PriorPipeline,
61
+ )
52
62
  from .stable_diffusion import (
53
63
  RBLNStableDiffusionImg2ImgPipeline,
54
64
  RBLNStableDiffusionInpaintPipeline,
@@ -0,0 +1,17 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .pipeline_kandinsky2_2_combined import RBLNKandinskyV22InpaintCombinedPipeline
16
+ from .pipeline_kandinsky2_2_inpaint import RBLNKandinskyV22InpaintPipeline
17
+ from .pipeline_kandinsky2_2_prior import RBLNKandinskyV22PriorPipeline
@@ -0,0 +1,83 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from diffusers import (
16
+ DDPMScheduler,
17
+ KandinskyV22InpaintCombinedPipeline,
18
+ PriorTransformer,
19
+ UnCLIPScheduler,
20
+ UNet2DConditionModel,
21
+ VQModel,
22
+ )
23
+ from transformers import (
24
+ CLIPImageProcessor,
25
+ CLIPTextModelWithProjection,
26
+ CLIPTokenizer,
27
+ CLIPVisionModelWithProjection,
28
+ )
29
+
30
+ from ...modeling_diffusers import RBLNDiffusionMixin
31
+ from .pipeline_kandinsky2_2_inpaint import RBLNKandinskyV22InpaintPipeline
32
+ from .pipeline_kandinsky2_2_prior import RBLNKandinskyV22PriorPipeline
33
+
34
+
35
+ class RBLNKandinskyV22InpaintCombinedPipeline(RBLNDiffusionMixin, KandinskyV22InpaintCombinedPipeline):
36
+ original_class = KandinskyV22InpaintCombinedPipeline
37
+ _connected_classes = {"prior_pipe": RBLNKandinskyV22PriorPipeline, "decoder_pipe": RBLNKandinskyV22InpaintPipeline}
38
+ _submodules = ["prior_pipe", "decoder_pipe"]
39
+ _prefix = {"prior_pipe": "prior_"}
40
+
41
+ def __init__(
42
+ self,
43
+ unet: "UNet2DConditionModel",
44
+ scheduler: "DDPMScheduler",
45
+ movq: "VQModel",
46
+ prior_prior: "PriorTransformer",
47
+ prior_image_encoder: "CLIPVisionModelWithProjection",
48
+ prior_text_encoder: "CLIPTextModelWithProjection",
49
+ prior_tokenizer: "CLIPTokenizer",
50
+ prior_scheduler: "UnCLIPScheduler",
51
+ prior_image_processor: "CLIPImageProcessor",
52
+ ):
53
+ RBLNDiffusionMixin.__init__(self)
54
+ super(KandinskyV22InpaintCombinedPipeline, self).__init__()
55
+
56
+ self.register_modules(
57
+ unet=unet,
58
+ scheduler=scheduler,
59
+ movq=movq,
60
+ prior_prior=prior_prior,
61
+ prior_image_encoder=prior_image_encoder,
62
+ prior_text_encoder=prior_text_encoder,
63
+ prior_tokenizer=prior_tokenizer,
64
+ prior_scheduler=prior_scheduler,
65
+ prior_image_processor=prior_image_processor,
66
+ )
67
+
68
+ self.prior_pipe = RBLNKandinskyV22PriorPipeline(
69
+ prior=prior_prior,
70
+ image_encoder=prior_image_encoder,
71
+ text_encoder=prior_text_encoder,
72
+ tokenizer=prior_tokenizer,
73
+ scheduler=prior_scheduler,
74
+ image_processor=prior_image_processor,
75
+ )
76
+ self.decoder_pipe = RBLNKandinskyV22InpaintPipeline(
77
+ unet=unet,
78
+ scheduler=scheduler,
79
+ movq=movq,
80
+ )
81
+
82
+ def get_compiled_image_size(self):
83
+ return self.movq.image_size
@@ -0,0 +1,22 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from diffusers import KandinskyV22InpaintPipeline
16
+
17
+ from ...modeling_diffusers import RBLNDiffusionMixin
18
+
19
+
20
+ class RBLNKandinskyV22InpaintPipeline(RBLNDiffusionMixin, KandinskyV22InpaintPipeline):
21
+ original_class = KandinskyV22InpaintPipeline
22
+ _submodules = ["unet", "movq"]
@@ -0,0 +1,22 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from diffusers import KandinskyV22PriorPipeline
16
+
17
+ from ...modeling_diffusers import RBLNDiffusionMixin
18
+
19
+
20
+ class RBLNKandinskyV22PriorPipeline(RBLNDiffusionMixin, KandinskyV22PriorPipeline):
21
+ original_class = KandinskyV22PriorPipeline
22
+ _submodules = ["text_encoder", "image_encoder", "prior"]
@@ -40,6 +40,7 @@ _import_structure = {
40
40
  "RBLNCLIPTextModel",
41
41
  "RBLNCLIPTextModelWithProjection",
42
42
  "RBLNCLIPVisionModel",
43
+ "RBLNCLIPVisionModelWithProjection",
43
44
  "RBLNDPTForDepthEstimation",
44
45
  "RBLNExaoneForCausalLM",
45
46
  "RBLNGemmaForCausalLM",
@@ -99,6 +100,7 @@ if TYPE_CHECKING:
99
100
  RBLNCLIPTextModel,
100
101
  RBLNCLIPTextModelWithProjection,
101
102
  RBLNCLIPVisionModel,
103
+ RBLNCLIPVisionModelWithProjection,
102
104
  RBLNDPTForDepthEstimation,
103
105
  RBLNExaoneForCausalLM,
104
106
  RBLNGemmaForCausalLM,
@@ -34,7 +34,12 @@ _import_structure = {
34
34
  ],
35
35
  "bart": ["RBLNBartForConditionalGeneration", "RBLNBartModel"],
36
36
  "bert": ["RBLNBertModel", "RBLNBertForQuestionAnswering", "RBLNBertForMaskedLM"],
37
- "clip": ["RBLNCLIPTextModel", "RBLNCLIPTextModelWithProjection", "RBLNCLIPVisionModel"],
37
+ "clip": [
38
+ "RBLNCLIPTextModel",
39
+ "RBLNCLIPTextModelWithProjection",
40
+ "RBLNCLIPVisionModel",
41
+ "RBLNCLIPVisionModelWithProjection",
42
+ ],
38
43
  "dpt": ["RBLNDPTForDepthEstimation"],
39
44
  "exaone": ["RBLNExaoneForCausalLM"],
40
45
  "gemma": ["RBLNGemmaForCausalLM"],
@@ -68,7 +73,12 @@ if TYPE_CHECKING:
68
73
  )
69
74
  from .bart import RBLNBartForConditionalGeneration, RBLNBartModel
70
75
  from .bert import RBLNBertForMaskedLM, RBLNBertForQuestionAnswering, RBLNBertModel
71
- from .clip import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection, RBLNCLIPVisionModel
76
+ from .clip import (
77
+ RBLNCLIPTextModel,
78
+ RBLNCLIPTextModelWithProjection,
79
+ RBLNCLIPVisionModel,
80
+ RBLNCLIPVisionModelWithProjection,
81
+ )
72
82
  from .dpt import RBLNDPTForDepthEstimation
73
83
  from .exaone import RBLNExaoneForCausalLM
74
84
  from .gemma import RBLNGemmaForCausalLM
@@ -12,4 +12,9 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from .modeling_clip import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection, RBLNCLIPVisionModel
15
+ from .modeling_clip import (
16
+ RBLNCLIPTextModel,
17
+ RBLNCLIPTextModelWithProjection,
18
+ RBLNCLIPVisionModel,
19
+ RBLNCLIPVisionModelWithProjection,
20
+ )
@@ -22,7 +22,7 @@ from transformers import (
22
22
  CLIPVisionModel,
23
23
  )
24
24
  from transformers.modeling_outputs import BaseModelOutputWithPooling
25
- from transformers.models.clip.modeling_clip import CLIPTextModelOutput
25
+ from transformers.models.clip.modeling_clip import CLIPTextModelOutput, CLIPVisionModelOutput
26
26
 
27
27
  from ....diffusers.modeling_diffusers import RBLNDiffusionMixin
28
28
  from ....modeling import RBLNModel
@@ -116,6 +116,10 @@ class RBLNCLIPVisionModel(RBLNModel):
116
116
  def wrap_model_if_needed(cls, model: torch.nn.Module, rbln_config: RBLNConfig) -> torch.nn.Module:
117
117
  return _VisionEncoder(model).eval()
118
118
 
119
+ @classmethod
120
+ def update_rbln_config_using_pipe(cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]) -> Dict[str, Any]:
121
+ return rbln_config
122
+
119
123
  @classmethod
120
124
  def _get_rbln_config(
121
125
  cls,
@@ -179,3 +183,24 @@ class RBLNCLIPVisionModel(RBLNModel):
179
183
  pooler_output=output[1],
180
184
  hidden_states=output[2:],
181
185
  )
186
+
187
+
188
+ class RBLNCLIPVisionModelWithProjection(RBLNCLIPVisionModel):
189
+ def forward(
190
+ self,
191
+ pixel_values: Optional[torch.FloatTensor] = None,
192
+ **kwargs,
193
+ ) -> Union[Tuple, CLIPVisionModelOutput]:
194
+ if len(kwargs) > 0 and any(kwargs.values()):
195
+ logger.warning(f"Currently, optimum-rbln does not support kwargs {kwargs.keys()} for {self.__class__}.")
196
+
197
+ output = super().forward(pixel_values)
198
+ image_embeds = output[0]
199
+ last_hidden_state = output[1]
200
+ hidden_states = output[2:]
201
+
202
+ return CLIPVisionModelOutput(
203
+ image_embeds=image_embeds,
204
+ last_hidden_state=last_hidden_state,
205
+ hidden_states=hidden_states,
206
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: optimum-rbln
3
- Version: 0.2.1a5
3
+ Version: 0.7.2rc0
4
4
  Summary: Optimum RBLN is the interface between the Hugging Face Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
5
5
  Project-URL: Homepage, https://rebellions.ai
6
6
  Project-URL: Documentation, https://docs.rbln.ai
@@ -1,26 +1,32 @@
1
- optimum/rbln/__init__.py,sha256=sLCjJu_MLZEKDOwHIlJP4u4GzGZx-1kqHTYGw5B4xDg,6096
2
- optimum/rbln/__version__.py,sha256=J4Eyn4HLzB0UpyosVo-P3LCDkB5knEOS6Nu24mnl5NA,413
1
+ optimum/rbln/__init__.py,sha256=eHi15YM3989AcX52jka9rUmgAtlp1PHqMNwBEdOfuu8,6554
2
+ optimum/rbln/__version__.py,sha256=OIstx-UVfreSh_0960pBWlg0VhVw0Sy7VpRV6nHTpqA,414
3
3
  optimum/rbln/modeling.py,sha256=REImAAKO82CqSNABR-9E1jJEsWch9amSOwOOQhFEYLY,8283
4
4
  optimum/rbln/modeling_base.py,sha256=fQ0bI1Bb6GJquRXftmSSN9K-TXLhFltZJ6C-2w43xMg,21193
5
5
  optimum/rbln/modeling_config.py,sha256=7104bxmrvKW4Q6XTruQayiIGl8GHDFmPkJ3cknMIInE,11335
6
- optimum/rbln/diffusers/__init__.py,sha256=68FTAMpbbMflm8qiSqfM5J2_gFb3iU3fng6AL0TG47A,2913
7
- optimum/rbln/diffusers/modeling_diffusers.py,sha256=E1x-iOKEJCUB6ml0RgtFEVPPk6J6pqEF-JTEyOZzOyc,14928
8
- optimum/rbln/diffusers/models/__init__.py,sha256=aSL5_yd-y8Q6DxNvfQ-yl-BUNyMzI1P6AikjQMKZzpI,1357
6
+ optimum/rbln/diffusers/__init__.py,sha256=pOyoXv3-JRzTBSwPKbgLS9H6F2K9dJdReEmpGhcLQYU,3283
7
+ optimum/rbln/diffusers/modeling_diffusers.py,sha256=t7kwC-NEw-Y01EK8Xi4bqlsUOfabkNrLkmEHfRCHKG0,20320
8
+ optimum/rbln/diffusers/models/__init__.py,sha256=mkCvJyH1KcwrsUvYSq_bVC79oOfyqtBSFDyPS1_48wA,1478
9
9
  optimum/rbln/diffusers/models/controlnet.py,sha256=EM_HlzCdaZdnnK0oGpY2fQeigPqHhlwh4NHCzlmoumI,10512
10
- optimum/rbln/diffusers/models/autoencoders/__init__.py,sha256=nMfnwEwuOje-qKofAw-uOWUWcYV_YvnaN68IGfDdqHg,645
10
+ optimum/rbln/diffusers/models/autoencoders/__init__.py,sha256=dg17ZTUsiqTcbIaEE4fqew9uRbao0diQ21PXvRKIqKg,679
11
11
  optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py,sha256=rCbC32bJnfXtsLdVvNVVHpRAkCYy6jeCSwIZ-JSReWk,9220
12
- optimum/rbln/diffusers/models/autoencoders/vae.py,sha256=A-F2TRJ2vL4gNXiMT_hRGeanIFKWxJ1QaKmYVp41rwI,2513
13
- optimum/rbln/diffusers/models/transformers/__init__.py,sha256=TEhARgQJx_NUZzI6M8gt3aWbdzmLHnM6FMSQd9M9zCk,654
12
+ optimum/rbln/diffusers/models/autoencoders/vae.py,sha256=gB9HR7Bf7wpIXLv-Js4Pc3oyWRlqEe4cms4sI2AJicY,4380
13
+ optimum/rbln/diffusers/models/autoencoders/vq_model.py,sha256=GunIau02_-lodYZBzd0ktJSNRT5axEFIZxSAfj2Mlyo,5974
14
+ optimum/rbln/diffusers/models/transformers/__init__.py,sha256=V8rSR7WzHs-i8Cwb_MNxhY2NFbwPgxu24vGtkwl-6tk,706
15
+ optimum/rbln/diffusers/models/transformers/prior_transformer.py,sha256=VG9cQo-_eppDvQSW1q1euAGBt1socUHetN_fIN2u1iU,6169
14
16
  optimum/rbln/diffusers/models/transformers/transformer_sd3.py,sha256=n_krmMgiRxWrG--567PNpk58EG_X7x7H4gidIkRvwjo,7308
15
17
  optimum/rbln/diffusers/models/unets/__init__.py,sha256=MaICuK9CWjgzejXy8y2NDrphuEq1rkzanF8u45k6O5I,655
16
- optimum/rbln/diffusers/models/unets/unet_2d_condition.py,sha256=Z0-eAZw1Gah24y6uOO5m9-GRruBppCSdV2NQZLNtBaI,14021
17
- optimum/rbln/diffusers/pipelines/__init__.py,sha256=i8AQJSoV9clLTill7wP5ECci6E7lC2gBaNuqfhYklZk,2469
18
+ optimum/rbln/diffusers/models/unets/unet_2d_condition.py,sha256=xHnBzFrm7aNaolxrsotbjo9GkbNiNdTleXQoeqGLlhg,15540
19
+ optimum/rbln/diffusers/pipelines/__init__.py,sha256=DAsM4eNks3hEY-bsUKSxRKmgwUWDGDlw82gfplSOdO8,2800
18
20
  optimum/rbln/diffusers/pipelines/controlnet/__init__.py,sha256=n1Ef22TSeax-kENi_d8K6wGGHSNEo9QkUeygELHgcao,983
19
21
  optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py,sha256=JWKtnZYBIfgmbAo0SLFIvHBQCv2BPSFNvpcdjG4GUOY,4113
20
22
  optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py,sha256=dGdw5cwJLS4CLv6IHskk5ZCcPgS7UDuHKbfOZ8ojNUs,35187
21
23
  optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py,sha256=7xCiXrH4ToCTHohVGFXqO7_f9G8HShYaHgZxoMZARkQ,33664
22
24
  optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py,sha256=Gzt2wg4dgFg0TV3Bu0cs8Xru3wVrxWUxxgciwZ-QKLE,44755
23
25
  optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py,sha256=RfwxNX_zQWFtvvFQJ5bt3qtHbdYdQV_3XLHm9WYCKOs,46084
26
+ optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py,sha256=YFqA76_XiMNxPwqotbHug2kd7jCbOXOu5NlxG2hbaVs,808
27
+ optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py,sha256=9szfe1NvOr1mgDnSPZvBGq1b65RElUrqLVhuErY3Dmw,2962
28
+ optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py,sha256=WxBbHAZSAKDSWhFerPvUlIhhWEsejW4NmhwmWX-_b54,856
29
+ optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py,sha256=Mf7tzrXetwCgt7LuXfkX-CX1hltLgNZdwF9bHxAbDJM,874
24
30
  optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py,sha256=gz6CbP4T6w8XH3PIGRIJXTmKFsChJIkwcAEAsiR5Ydg,830
25
31
  optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py,sha256=DgRLzO9HxtgE1jICmHoHaqeVXM4Ih-5uo2JqNMAPMcc,876
26
32
  optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py,sha256=qU7wN2gFUjFImuB6CGDY2SC4aZw4VhaRKu92eA_Fa08,904
@@ -37,11 +43,11 @@ optimum/rbln/ops/__init__.py,sha256=MbAHevg59fXQOFgrnsFFZ97s8-YrgvYCYML_sgKEEfM,
37
43
  optimum/rbln/ops/attn.py,sha256=QYvSMg4ps_PenHwpLVhuYRoOAFvHIo19nY0ZEdj4nTE,9700
38
44
  optimum/rbln/ops/flash_attn.py,sha256=Zn5nkouY3kk6MBivQpPjgGh4oepjpi8F3tnTrmrNfpg,2304
39
45
  optimum/rbln/ops/kv_cache_update.py,sha256=9W4WCO1Dtfy0u5i978JJRa7uLbqrfR2lHuoPynb07fw,3143
40
- optimum/rbln/transformers/__init__.py,sha256=SdOjpa4Iufo6aOJPvjQwD_vz28dmmNV9AtF5Cz9ajLM,4167
46
+ optimum/rbln/transformers/__init__.py,sha256=AGo3BqVIZrsOzYsQAnnQ25HCstTPBclrXbvvUxVMlqE,4255
41
47
  optimum/rbln/transformers/modeling_alias.py,sha256=yx7FnZQWAnrWzivaO5hI7T6i-fyLzt2tMIXG2oDNbPo,1657
42
48
  optimum/rbln/transformers/modeling_generic.py,sha256=SD7XjpjnCn-ejNAUWgkaaHV6Fv2Y6K-hbXEXXb9W_H4,18177
43
49
  optimum/rbln/transformers/modeling_rope_utils.py,sha256=3zwkhYUyTZhxCJUSmwCc88iiY1TppRWEY9ShwUqNB2k,14293
44
- optimum/rbln/transformers/models/__init__.py,sha256=wucrA1ybpDfNcrySwdVeK5PZEYl-3ONXJvGpHGTvteo,3683
50
+ optimum/rbln/transformers/models/__init__.py,sha256=zGnYODR-_T65tv6jFjtC8l01LC4vjfm41bM4doCXRvY,3835
45
51
  optimum/rbln/transformers/models/auto/__init__.py,sha256=GvGbb3ZpMv-h6euXeZ42jSizoOfrL2O1uvpAnfKxYEo,1034
46
52
  optimum/rbln/transformers/models/auto/auto_factory.py,sha256=IK9jFrJ3EEzYQa9_aKpcp2TO68M5YGkA-HcfBVpA2QU,7027
47
53
  optimum/rbln/transformers/models/auto/modeling_auto.py,sha256=Un9qoqdy3dO8JBza_bTJF_6_fRVNM9QisihSgTRFI-o,3933
@@ -50,8 +56,8 @@ optimum/rbln/transformers/models/bart/bart_architecture.py,sha256=dTkgMpNkyh4vT_
50
56
  optimum/rbln/transformers/models/bart/modeling_bart.py,sha256=ADRbE-5N3xJ60AzzjJ4BZs_THmB71qs4XTr9iFqsEqE,5667
51
57
  optimum/rbln/transformers/models/bert/__init__.py,sha256=YVV7k_laU6yJBawZrgjIWjRmIF-Y4oQQHqyf8lsraQs,691
52
58
  optimum/rbln/transformers/models/bert/modeling_bert.py,sha256=-nv-sgmHkyHQIoQvF8-lXOJiL4eaa1pq8MpdN4uRi9M,4668
53
- optimum/rbln/transformers/models/clip/__init__.py,sha256=ssJqlEt318ti2QaEakGh_tO3Ap1VSPCVF-ymUuvjAJs,698
54
- optimum/rbln/transformers/models/clip/modeling_clip.py,sha256=E1QfVNq1sTCp7uvuha1ZPfXMwvMTkGV9L4oFdmy1w4g,5724
59
+ optimum/rbln/transformers/models/clip/__init__.py,sha256=H9vuBwrmFO0-CqZhXUrKF-uQL6igCqMlqrT1X_ELaAI,754
60
+ optimum/rbln/transformers/models/clip/modeling_clip.py,sha256=NiSm7bHs4SReHDUr53BBWSX0Y8bkKOeUSpsBDrp8YDw,6628
55
61
  optimum/rbln/transformers/models/decoderonly/__init__.py,sha256=pDogsdpJKKB5rqnVFrRjwfhUvOSV-jZ3oARMsqSvOOQ,665
56
62
  optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py,sha256=eT1fbKDL92yGBXtUKA_JibD4kiRPdf3tAFJHP5nlfH4,36646
57
63
  optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py,sha256=2OO8MEgFgcl1VPrQXxqkvmRJJEuFdexwu8XqbHDbR6Y,27609
@@ -108,7 +114,7 @@ optimum/rbln/utils/model_utils.py,sha256=DfD_Z2qvZHqcddXqnzTM1AN8khanj3-DXK2lJvV
108
114
  optimum/rbln/utils/runtime_utils.py,sha256=5-DYniyP59nx-mrrbi7AqA77L85b4Cm5oLpaxidSyss,3699
109
115
  optimum/rbln/utils/save_utils.py,sha256=hG5uOtYmecSXZuGTvCXsTM-SiyZpr5q3InUGCCq_jzQ,3619
110
116
  optimum/rbln/utils/submodule.py,sha256=oZoGrItB8WqY4i-K9WJPlLlcLohc1YGB9OHB8_XZw3A,4071
111
- optimum_rbln-0.2.1a5.dist-info/METADATA,sha256=WSMoEbo3z3TMFB1lqbdJsu4ZeVI9AtewXktRjMk6WQw,5300
112
- optimum_rbln-0.2.1a5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
- optimum_rbln-0.2.1a5.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
114
- optimum_rbln-0.2.1a5.dist-info/RECORD,,
117
+ optimum_rbln-0.7.2rc0.dist-info/METADATA,sha256=4lKtPSN8dCRMWCpiwizg0MxQtvlVfQnyWVIFhCfAMLs,5301
118
+ optimum_rbln-0.7.2rc0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
119
+ optimum_rbln-0.7.2rc0.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
120
+ optimum_rbln-0.7.2rc0.dist-info/RECORD,,