optimum-rbln 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. optimum/rbln/__init__.py +41 -38
  2. optimum/rbln/__version__.py +16 -1
  3. optimum/rbln/diffusers/__init__.py +26 -2
  4. optimum/rbln/{modeling_diffusers.py → diffusers/modeling_diffusers.py} +97 -126
  5. optimum/rbln/diffusers/models/__init__.py +36 -3
  6. optimum/rbln/{transformers/generation → diffusers/models/autoencoders}/__init__.py +1 -2
  7. optimum/rbln/diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +73 -61
  8. optimum/rbln/diffusers/models/autoencoders/vae.py +83 -0
  9. optimum/rbln/diffusers/models/controlnet.py +54 -14
  10. optimum/rbln/diffusers/models/transformers/__init__.py +24 -0
  11. optimum/rbln/diffusers/models/transformers/transformer_sd3.py +203 -0
  12. optimum/rbln/diffusers/models/unets/__init__.py +24 -0
  13. optimum/rbln/diffusers/models/{unet_2d_condition.py → unets/unet_2d_condition.py} +82 -22
  14. optimum/rbln/diffusers/pipelines/__init__.py +23 -2
  15. optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +13 -33
  16. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +17 -2
  17. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +18 -2
  18. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +18 -2
  19. optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +18 -2
  20. optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +1 -0
  21. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -2
  22. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +1 -13
  23. optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +31 -0
  24. optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +26 -0
  25. optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +31 -0
  26. optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +31 -0
  27. optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +31 -0
  28. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +24 -0
  29. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +15 -8
  30. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +15 -8
  31. optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +31 -0
  32. optimum/rbln/modeling.py +238 -0
  33. optimum/rbln/modeling_base.py +186 -760
  34. optimum/rbln/modeling_config.py +31 -7
  35. optimum/rbln/ops/__init__.py +26 -0
  36. optimum/rbln/ops/attn.py +221 -0
  37. optimum/rbln/ops/flash_attn.py +70 -0
  38. optimum/rbln/ops/kv_cache_update.py +69 -0
  39. optimum/rbln/transformers/__init__.py +20 -2
  40. optimum/rbln/{modeling_alias.py → transformers/modeling_alias.py} +5 -1
  41. optimum/rbln/transformers/modeling_generic.py +385 -0
  42. optimum/rbln/transformers/models/auto/__init__.py +23 -0
  43. optimum/rbln/transformers/models/auto/auto_factory.py +117 -23
  44. optimum/rbln/transformers/models/auto/modeling_auto.py +36 -12
  45. optimum/rbln/transformers/models/bart/__init__.py +0 -1
  46. optimum/rbln/transformers/models/bart/bart_architecture.py +107 -464
  47. optimum/rbln/transformers/models/bart/modeling_bart.py +10 -9
  48. optimum/rbln/transformers/models/bert/modeling_bert.py +3 -6
  49. optimum/rbln/transformers/models/clip/modeling_clip.py +8 -25
  50. optimum/rbln/transformers/models/decoderonly/__init__.py +0 -10
  51. optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +775 -514
  52. optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +128 -260
  53. optimum/rbln/transformers/models/dpt/modeling_dpt.py +1 -1
  54. optimum/rbln/transformers/models/exaone/exaone_architecture.py +60 -45
  55. optimum/rbln/transformers/models/exaone/modeling_exaone.py +4 -2
  56. optimum/rbln/transformers/models/gemma/gemma_architecture.py +33 -104
  57. optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +50 -238
  58. optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +3 -2
  59. optimum/rbln/transformers/models/llama/llama_architecture.py +0 -1
  60. optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +3 -75
  61. optimum/rbln/transformers/models/midm/midm_architecture.py +84 -238
  62. optimum/rbln/transformers/models/midm/modeling_midm.py +5 -6
  63. optimum/rbln/transformers/models/mistral/mistral_architecture.py +0 -1
  64. optimum/rbln/transformers/models/phi/phi_architecture.py +60 -261
  65. optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +0 -1
  66. optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +58 -103
  67. optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +498 -0
  68. optimum/rbln/transformers/models/t5/__init__.py +0 -1
  69. optimum/rbln/transformers/models/t5/modeling_t5.py +106 -5
  70. optimum/rbln/transformers/models/t5/t5_architecture.py +106 -448
  71. optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +1 -1
  72. optimum/rbln/transformers/models/whisper/generation_whisper.py +42 -0
  73. optimum/rbln/transformers/models/whisper/modeling_whisper.py +78 -55
  74. optimum/rbln/transformers/models/whisper/whisper_architecture.py +219 -312
  75. optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +3 -35
  76. optimum/rbln/transformers/utils/rbln_quantization.py +120 -4
  77. optimum/rbln/utils/decorator_utils.py +51 -11
  78. optimum/rbln/utils/hub.py +131 -0
  79. optimum/rbln/utils/import_utils.py +22 -1
  80. optimum/rbln/utils/logging.py +37 -0
  81. optimum/rbln/utils/model_utils.py +52 -0
  82. optimum/rbln/utils/runtime_utils.py +10 -4
  83. optimum/rbln/utils/save_utils.py +17 -0
  84. optimum/rbln/utils/submodule.py +137 -0
  85. optimum_rbln-0.2.0.dist-info/METADATA +117 -0
  86. optimum_rbln-0.2.0.dist-info/RECORD +114 -0
  87. {optimum_rbln-0.1.13.dist-info → optimum_rbln-0.2.0.dist-info}/WHEEL +1 -1
  88. optimum_rbln-0.2.0.dist-info/licenses/LICENSE +288 -0
  89. optimum/rbln/transformers/cache_utils.py +0 -107
  90. optimum/rbln/transformers/generation/streamers.py +0 -139
  91. optimum/rbln/transformers/generation/utils.py +0 -397
  92. optimum/rbln/transformers/models/exaone/hf_hub_cached/configuration_exaone.py +0 -181
  93. optimum/rbln/transformers/models/exaone/hf_hub_cached/modeling_exaone.py +0 -1725
  94. optimum/rbln/transformers/models/midm/hf_hub_cached/configuration_midm.py +0 -22
  95. optimum/rbln/transformers/models/midm/hf_hub_cached/midm_bitext_tokenization.py +0 -304
  96. optimum/rbln/transformers/models/midm/hf_hub_cached/modeling_midm.py +0 -1469
  97. optimum/rbln/transformers/models/midm/hf_hub_cached/rotary_position_embedding.py +0 -98
  98. optimum/rbln/utils/context.py +0 -58
  99. optimum/rbln/utils/timer_utils.py +0 -43
  100. optimum_rbln-0.1.13.dist-info/METADATA +0 -120
  101. optimum_rbln-0.1.13.dist-info/RECORD +0 -107
  102. optimum_rbln-0.1.13.dist-info/entry_points.txt +0 -4
  103. optimum_rbln-0.1.13.dist-info/licenses/LICENSE +0 -201
@@ -29,9 +29,9 @@ import torch
29
29
  from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
30
30
  from transformers import PretrainedConfig
31
31
 
32
- from ...modeling_base import RBLNModel
33
- from ...modeling_config import RBLNCompileConfig, RBLNConfig
34
- from ...utils.context import override_auto_classes
32
+ from ....modeling import RBLNModel
33
+ from ....modeling_config import RBLNCompileConfig, RBLNConfig
34
+ from ...modeling_diffusers import RBLNDiffusionMixin
35
35
 
36
36
 
37
37
  if TYPE_CHECKING:
@@ -125,6 +125,9 @@ class _UNet_SDXL(torch.nn.Module):
125
125
 
126
126
 
127
127
  class RBLNUNet2DConditionModel(RBLNModel):
128
+ hf_library_name = "diffusers"
129
+ auto_model_class = UNet2DConditionModel
130
+
128
131
  def __post_init__(self, **kwargs):
129
132
  super().__post_init__(**kwargs)
130
133
  self.in_features = self.rbln_config.model_cfg.get("in_features", None)
@@ -140,15 +143,6 @@ class RBLNUNet2DConditionModel(RBLNModel):
140
143
 
141
144
  self.add_embedding = ADDEMBEDDING(LINEAR1(self.in_features))
142
145
 
143
- @classmethod
144
- def from_pretrained(cls, *args, **kwargs):
145
- with override_auto_classes(
146
- config_func=UNet2DConditionModel.load_config,
147
- model_func=UNet2DConditionModel.from_pretrained,
148
- ):
149
- rt = super().from_pretrained(*args, **kwargs)
150
- return rt
151
-
152
146
  @classmethod
153
147
  def wrap_model_if_needed(cls, model: torch.nn.Module, rbln_config: RBLNConfig) -> torch.nn.Module:
154
148
  if model.config.addition_embed_type == "text_time":
@@ -156,6 +150,61 @@ class RBLNUNet2DConditionModel(RBLNModel):
156
150
  else:
157
151
  return _UNet_SD(model).eval()
158
152
 
153
+ @classmethod
154
+ def get_unet_sample_size(
155
+ cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]
156
+ ) -> Union[int, Tuple[int, int]]:
157
+ image_size = (rbln_config.get("img_height"), rbln_config.get("img_width"))
158
+ if (image_size[0] is None) != (image_size[1] is None):
159
+ raise ValueError("Both image height and image width must be given or not given")
160
+ elif image_size[0] is None and image_size[1] is None:
161
+ if rbln_config["img2img_pipeline"]:
162
+ # In case of img2img, sample size of unet is determined by vae encoder.
163
+ vae_sample_size = pipe.vae.config.sample_size
164
+ if isinstance(vae_sample_size, int):
165
+ sample_size = vae_sample_size // pipe.vae_scale_factor
166
+ else:
167
+ sample_size = (
168
+ vae_sample_size[0] // pipe.vae_scale_factor,
169
+ vae_sample_size[1] // pipe.vae_scale_factor,
170
+ )
171
+ else:
172
+ sample_size = pipe.unet.config.sample_size
173
+ else:
174
+ sample_size = (image_size[0] // pipe.vae_scale_factor, image_size[1] // pipe.vae_scale_factor)
175
+
176
+ return sample_size
177
+
178
+ @classmethod
179
+ def update_rbln_config_using_pipe(cls, pipe: RBLNDiffusionMixin, rbln_config: Dict[str, Any]) -> Dict[str, Any]:
180
+ text_model_hidden_size = pipe.text_encoder_2.config.hidden_size if hasattr(pipe, "text_encoder_2") else None
181
+
182
+ batch_size = rbln_config.get("batch_size")
183
+ if not batch_size:
184
+ do_classifier_free_guidance = (
185
+ rbln_config.get("guidance_scale", 5.0) > 1.0 and pipe.unet.config.time_cond_proj_dim is None
186
+ )
187
+ batch_size = 2 if do_classifier_free_guidance else 1
188
+ else:
189
+ if rbln_config.get("guidance_scale"):
190
+ logger.warning(
191
+ "guidance_scale is ignored because batch size is explicitly specified. "
192
+ "To ensure consistent behavior, consider removing the guidance scale or "
193
+ "adjusting the batch size configuration as needed."
194
+ )
195
+
196
+ rbln_config.update(
197
+ {
198
+ "max_seq_len": pipe.text_encoder.config.max_position_embeddings,
199
+ "text_model_hidden_size": text_model_hidden_size,
200
+ "sample_size": cls.get_unet_sample_size(pipe, rbln_config),
201
+ "batch_size": batch_size,
202
+ "is_controlnet": "controlnet" in pipe.config.keys(),
203
+ }
204
+ )
205
+
206
+ return rbln_config
207
+
159
208
  @classmethod
160
209
  def _get_rbln_config(
161
210
  cls,
@@ -179,7 +228,7 @@ class RBLNUNet2DConditionModel(RBLNModel):
179
228
  sample_size = (sample_size, sample_size)
180
229
 
181
230
  if max_seq_len is None:
182
- raise ValueError("`rbln_max_seq_len` (ex. text_encoder's max_position_embeddings )must be specified")
231
+ raise ValueError("`rbln_max_seq_len` (ex. text_encoder's max_position_embeddings) must be specified.")
183
232
 
184
233
  input_info = [
185
234
  ("sample", [batch_size, model_config.in_channels, sample_size[0], sample_size[1]], "float32"),
@@ -216,15 +265,13 @@ class RBLNUNet2DConditionModel(RBLNModel):
216
265
  ]
217
266
  input_info.append(("mid_block_additional_residual", shape, "float32"))
218
267
 
219
- rbln_compile_config = RBLNCompileConfig(input_info=input_info)
220
-
221
268
  if hasattr(model_config, "addition_embed_type") and model_config.addition_embed_type == "text_time":
222
269
  rbln_text_model_hidden_size = rbln_kwargs["text_model_hidden_size"]
223
270
  rbln_in_features = model_config.projection_class_embeddings_input_dim
224
- rbln_compile_config.input_info.append(
225
- ("text_embeds", [batch_size, rbln_text_model_hidden_size], "float32")
226
- )
227
- rbln_compile_config.input_info.append(("time_ids", [batch_size, 6], "float32"))
271
+ input_info.append(("text_embeds", [batch_size, rbln_text_model_hidden_size], "float32"))
272
+ input_info.append(("time_ids", [batch_size, 6], "float32"))
273
+
274
+ rbln_compile_config = RBLNCompileConfig(input_info=input_info)
228
275
 
229
276
  rbln_config = RBLNConfig(
230
277
  rbln_cls=cls.__name__,
@@ -237,6 +284,10 @@ class RBLNUNet2DConditionModel(RBLNModel):
237
284
 
238
285
  return rbln_config
239
286
 
287
+ @property
288
+ def compiled_batch_size(self):
289
+ return self.rbln_config.compile_cfgs[0].input_info[0][1][0]
290
+
240
291
  def forward(
241
292
  self,
242
293
  sample: torch.Tensor,
@@ -254,9 +305,18 @@ class RBLNUNet2DConditionModel(RBLNModel):
254
305
  return_dict: bool = True,
255
306
  **kwargs,
256
307
  ):
257
- """
258
- arg order : latent_model_input, t, prompt_embeds
259
- """
308
+ sample_batch_size = sample.size()[0]
309
+ compiled_batch_size = self.compiled_batch_size
310
+ if sample_batch_size != compiled_batch_size and (
311
+ sample_batch_size * 2 == compiled_batch_size or sample_batch_size == compiled_batch_size * 2
312
+ ):
313
+ raise ValueError(
314
+ f"Mismatch between UNet's runtime batch size ({sample_batch_size}) and compiled batch size ({compiled_batch_size}). "
315
+ "This may be caused by the 'guidance scale' parameter, which doubles the runtime batch size in Stable Diffusion. "
316
+ "Adjust the batch size during compilation or modify the 'guidance scale' to match the compiled batch size.\n\n"
317
+ "For details, see: https://docs.rbln.ai/software/optimum/model_api.html#stable-diffusion"
318
+ )
319
+
260
320
  added_cond_kwargs = {} if added_cond_kwargs is None else added_cond_kwargs
261
321
 
262
322
  if down_block_additional_residuals is not None:
@@ -20,6 +20,7 @@
20
20
  # are the intellectual property of Rebellions Inc. and may not be
21
21
  # copied, modified, or distributed without prior written permission
22
22
  # from Rebellions Inc.
23
+
23
24
  from typing import TYPE_CHECKING
24
25
 
25
26
  from transformers.utils import _LazyModule
@@ -36,8 +37,18 @@ _import_structure = {
36
37
  "stable_diffusion": [
37
38
  "RBLNStableDiffusionImg2ImgPipeline",
38
39
  "RBLNStableDiffusionPipeline",
40
+ "RBLNStableDiffusionInpaintPipeline",
41
+ ],
42
+ "stable_diffusion_xl": [
43
+ "RBLNStableDiffusionXLImg2ImgPipeline",
44
+ "RBLNStableDiffusionXLPipeline",
45
+ "RBLNStableDiffusionXLInpaintPipeline",
46
+ ],
47
+ "stable_diffusion_3": [
48
+ "RBLNStableDiffusion3Pipeline",
49
+ "RBLNStableDiffusion3Img2ImgPipeline",
50
+ "RBLNStableDiffusion3InpaintPipeline",
39
51
  ],
40
- "stable_diffusion_xl": ["RBLNStableDiffusionXLImg2ImgPipeline", "RBLNStableDiffusionXLPipeline"],
41
52
  }
42
53
  if TYPE_CHECKING:
43
54
  from .controlnet import (
@@ -49,9 +60,19 @@ if TYPE_CHECKING:
49
60
  )
50
61
  from .stable_diffusion import (
51
62
  RBLNStableDiffusionImg2ImgPipeline,
63
+ RBLNStableDiffusionInpaintPipeline,
52
64
  RBLNStableDiffusionPipeline,
53
65
  )
54
- from .stable_diffusion_xl import RBLNStableDiffusionXLImg2ImgPipeline, RBLNStableDiffusionXLPipeline
66
+ from .stable_diffusion_3 import (
67
+ RBLNStableDiffusion3Img2ImgPipeline,
68
+ RBLNStableDiffusion3InpaintPipeline,
69
+ RBLNStableDiffusion3Pipeline,
70
+ )
71
+ from .stable_diffusion_xl import (
72
+ RBLNStableDiffusionXLImg2ImgPipeline,
73
+ RBLNStableDiffusionXLInpaintPipeline,
74
+ RBLNStableDiffusionXLPipeline,
75
+ )
55
76
  else:
56
77
  import sys
57
78
 
@@ -27,13 +27,9 @@ from pathlib import Path
27
27
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
28
28
 
29
29
  import torch
30
- from diffusers import ControlNetModel
31
30
  from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
32
- from optimum.exporters import TasksManager
33
- from transformers import AutoConfig, AutoModel
34
31
 
35
- from ....modeling_base import RBLNModel
36
- from ....modeling_config import RBLNConfig
32
+ from ....modeling import RBLNModel
37
33
  from ...models.controlnet import RBLNControlNetModel
38
34
 
39
35
 
@@ -44,10 +40,12 @@ logger = logging.getLogger(__name__)
44
40
 
45
41
 
46
42
  class RBLNMultiControlNetModel(RBLNModel):
43
+ hf_library_name = "diffusers"
44
+ _hf_class = MultiControlNetModel
45
+
47
46
  def __init__(
48
47
  self,
49
48
  models: List[RBLNControlNetModel],
50
- **kwargs,
51
49
  ):
52
50
  self.nets = models
53
51
  self.dtype = torch.float32
@@ -59,27 +57,6 @@ class RBLNMultiControlNetModel(RBLNModel):
59
57
  cm.extend(net.compiled_models)
60
58
  return cm
61
59
 
62
- @classmethod
63
- def from_pretrained(cls, *args, **kwargs):
64
- def get_model_from_task(
65
- task: str,
66
- model_name_or_path: Union[str, Path],
67
- **kwargs,
68
- ):
69
- return MultiControlNetModel.from_pretrained(pretrained_model_name_or_path=model_name_or_path, **kwargs)
70
-
71
- tasktmp = TasksManager.get_model_from_task
72
- configtmp = AutoConfig.from_pretrained
73
- modeltmp = AutoModel.from_pretrained
74
- TasksManager.get_model_from_task = get_model_from_task
75
- AutoConfig.from_pretrained = ControlNetModel.load_config
76
- AutoModel.from_pretrained = MultiControlNetModel.from_pretrained
77
- rt = super().from_pretrained(*args, **kwargs)
78
- AutoConfig.from_pretrained = configtmp
79
- AutoModel.from_pretrained = modeltmp
80
- TasksManager.get_model_from_task = tasktmp
81
- return rt
82
-
83
60
  @classmethod
84
61
  def _from_pretrained(
85
62
  cls,
@@ -88,19 +65,22 @@ class RBLNMultiControlNetModel(RBLNModel):
88
65
  ) -> RBLNModel:
89
66
  idx = 0
90
67
  controlnets = []
91
- model_path_to_load = model_id
68
+ subfolder_name = kwargs.pop("subfolder", None)
69
+ if subfolder_name is not None:
70
+ model_path_to_load = model_id + "/" + subfolder_name
71
+ else:
72
+ model_path_to_load = model_id
73
+
74
+ base_model_path_to_load = model_path_to_load
92
75
 
93
76
  while os.path.isdir(model_path_to_load):
94
77
  controlnet = RBLNControlNetModel.from_pretrained(model_path_to_load, export=False, **kwargs)
95
78
  controlnets.append(controlnet)
96
- rbln_config = RBLNConfig.load(model_path_to_load)
97
79
  idx += 1
98
- model_path_to_load = model_id + f"_{idx}"
80
+ model_path_to_load = base_model_path_to_load + f"_{idx}"
99
81
 
100
82
  return cls(
101
83
  controlnets,
102
- rbln_config=rbln_config,
103
- **kwargs,
104
84
  )
105
85
 
106
86
  def save_pretrained(self, save_directory: Union[str, Path], **kwargs):
@@ -118,7 +98,7 @@ class RBLNMultiControlNetModel(RBLNModel):
118
98
  sample: torch.FloatTensor,
119
99
  timestep: Union[torch.Tensor, float, int],
120
100
  encoder_hidden_states: torch.Tensor,
121
- controlnet_cond: List[torch.tensor],
101
+ controlnet_cond: List[torch.Tensor],
122
102
  conditioning_scale: List[float],
123
103
  class_labels: Optional[torch.Tensor] = None,
124
104
  timestep_cond: Optional[torch.Tensor] = None,
@@ -1,3 +1,17 @@
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  # Copyright 2024 Rebellions Inc.
2
16
 
3
17
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,7 +34,6 @@
20
34
  # are the intellectual property of Rebellions Inc. and may not be
21
35
  # copied, modified, or distributed without prior written permission
22
36
  # from Rebellions Inc.
23
- """RBLNStableDiffusionPipeline class for inference of diffusion models on rbln devices."""
24
37
 
25
38
  from typing import Any, Callable, Dict, List, Optional, Union
26
39
 
@@ -33,8 +46,8 @@ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
33
46
  from diffusers.utils import deprecate, logging
34
47
  from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
35
48
 
36
- from ....modeling_diffusers import RBLNDiffusionMixin
37
49
  from ....utils.decorator_utils import remove_compile_time_kwargs
50
+ from ...modeling_diffusers import RBLNDiffusionMixin
38
51
  from ...models import RBLNControlNetModel
39
52
  from ...pipelines.controlnet.multicontrolnet import RBLNMultiControlNetModel
40
53
 
@@ -46,6 +59,7 @@ class RBLNStableDiffusionControlNetPipeline(RBLNDiffusionMixin, StableDiffusionC
46
59
  original_class = StableDiffusionControlNetPipeline
47
60
  _submodules = ["text_encoder", "unet", "vae", "controlnet"]
48
61
 
62
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet.py
49
63
  def check_inputs(
50
64
  self,
51
65
  prompt,
@@ -209,6 +223,7 @@ class RBLNStableDiffusionControlNetPipeline(RBLNDiffusionMixin, StableDiffusionC
209
223
  f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
210
224
  )
211
225
 
226
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet.py
212
227
  @torch.no_grad()
213
228
  @remove_compile_time_kwargs
214
229
  def __call__(
@@ -1,3 +1,17 @@
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  # Copyright 2024 Rebellions Inc.
2
16
 
3
17
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,7 +34,6 @@
20
34
  # are the intellectual property of Rebellions Inc. and may not be
21
35
  # copied, modified, or distributed without prior written permission
22
36
  # from Rebellions Inc.
23
- """RBLNStableDiffusionPipeline class for inference of diffusion models on rbln devices."""
24
37
 
25
38
  from typing import Any, Callable, Dict, List, Optional, Union
26
39
 
@@ -32,8 +45,8 @@ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
32
45
  from diffusers.utils import deprecate, logging
33
46
  from diffusers.utils.torch_utils import is_compiled_module
34
47
 
35
- from ....modeling_diffusers import RBLNDiffusionMixin
36
48
  from ....utils.decorator_utils import remove_compile_time_kwargs
49
+ from ...modeling_diffusers import RBLNDiffusionMixin
37
50
  from ...models import RBLNControlNetModel
38
51
  from ...pipelines.controlnet.multicontrolnet import RBLNMultiControlNetModel
39
52
 
@@ -42,8 +55,10 @@ logger = logging.get_logger(__name__)
42
55
 
43
56
 
44
57
  class RBLNStableDiffusionControlNetImg2ImgPipeline(RBLNDiffusionMixin, StableDiffusionControlNetImg2ImgPipeline):
58
+ original_class = StableDiffusionControlNetImg2ImgPipeline
45
59
  _submodules = ["text_encoder", "unet", "vae", "controlnet"]
46
60
 
61
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet_img2img.py
47
62
  def check_inputs(
48
63
  self,
49
64
  prompt,
@@ -201,6 +216,7 @@ class RBLNStableDiffusionControlNetImg2ImgPipeline(RBLNDiffusionMixin, StableDif
201
216
  f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
202
217
  )
203
218
 
219
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet_img2img.py
204
220
  @torch.no_grad()
205
221
  @remove_compile_time_kwargs
206
222
  def __call__(
@@ -1,3 +1,17 @@
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  # Copyright 2024 Rebellions Inc.
2
16
 
3
17
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,7 +34,6 @@
20
34
  # are the intellectual property of Rebellions Inc. and may not be
21
35
  # copied, modified, or distributed without prior written permission
22
36
  # from Rebellions Inc.
23
- """RBLNStableDiffusionXLPipeline class for inference of diffusion models on rbln devices."""
24
37
 
25
38
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
26
39
 
@@ -32,8 +45,8 @@ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffus
32
45
  from diffusers.utils import deprecate, logging
33
46
  from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
34
47
 
35
- from ....modeling_diffusers import RBLNDiffusionMixin
36
48
  from ....utils.decorator_utils import remove_compile_time_kwargs
49
+ from ...modeling_diffusers import RBLNDiffusionMixin
37
50
  from ...models import RBLNControlNetModel
38
51
  from ...pipelines.controlnet.multicontrolnet import RBLNMultiControlNetModel
39
52
 
@@ -42,8 +55,10 @@ logger = logging.get_logger(__name__)
42
55
 
43
56
 
44
57
  class RBLNStableDiffusionXLControlNetPipeline(RBLNDiffusionMixin, StableDiffusionXLControlNetPipeline):
58
+ original_class = StableDiffusionXLControlNetPipeline
45
59
  _submodules = ["text_encoder", "text_encoder_2", "unet", "vae", "controlnet"]
46
60
 
61
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.py
47
62
  def check_inputs(
48
63
  self,
49
64
  prompt,
@@ -233,6 +248,7 @@ class RBLNStableDiffusionXLControlNetPipeline(RBLNDiffusionMixin, StableDiffusio
233
248
  f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
234
249
  )
235
250
 
251
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl.py
236
252
  @torch.no_grad()
237
253
  @remove_compile_time_kwargs
238
254
  def __call__(
@@ -1,3 +1,17 @@
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
1
15
  # Copyright 2024 Rebellions Inc.
2
16
 
3
17
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,7 +34,6 @@
20
34
  # are the intellectual property of Rebellions Inc. and may not be
21
35
  # copied, modified, or distributed without prior written permission
22
36
  # from Rebellions Inc.
23
- """RBLNStableDiffusionXLPipeline class for inference of diffusion models on rbln devices."""
24
37
 
25
38
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
26
39
 
@@ -32,8 +45,8 @@ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffus
32
45
  from diffusers.utils import deprecate, logging
33
46
  from diffusers.utils.torch_utils import is_compiled_module
34
47
 
35
- from ....modeling_diffusers import RBLNDiffusionMixin
36
48
  from ....utils.decorator_utils import remove_compile_time_kwargs
49
+ from ...modeling_diffusers import RBLNDiffusionMixin
37
50
  from ...models import RBLNControlNetModel
38
51
  from ...pipelines.controlnet.multicontrolnet import RBLNMultiControlNetModel
39
52
 
@@ -42,8 +55,10 @@ logger = logging.get_logger(__name__)
42
55
 
43
56
 
44
57
  class RBLNStableDiffusionXLControlNetImg2ImgPipeline(RBLNDiffusionMixin, StableDiffusionXLControlNetImg2ImgPipeline):
58
+ original_class = StableDiffusionXLControlNetImg2ImgPipeline
45
59
  _submodules = ["text_encoder", "text_encoder_2", "unet", "vae", "controlnet"]
46
60
 
61
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl_img2img.py
47
62
  def check_inputs(
48
63
  self,
49
64
  prompt,
@@ -245,6 +260,7 @@ class RBLNStableDiffusionXLControlNetImg2ImgPipeline(RBLNDiffusionMixin, StableD
245
260
  f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
246
261
  )
247
262
 
263
+ # Almost copied from diffusers.pipelines.controlnet.pipeline_controlnet_sd_xl_img2img.py
248
264
  @torch.no_grad()
249
265
  @remove_compile_time_kwargs
250
266
  def __call__(
@@ -23,3 +23,4 @@
23
23
 
24
24
  from .pipeline_stable_diffusion import RBLNStableDiffusionPipeline
25
25
  from .pipeline_stable_diffusion_img2img import RBLNStableDiffusionImg2ImgPipeline
26
+ from .pipeline_stable_diffusion_inpaint import RBLNStableDiffusionInpaintPipeline
@@ -20,12 +20,12 @@
20
20
  # are the intellectual property of Rebellions Inc. and may not be
21
21
  # copied, modified, or distributed without prior written permission
22
22
  # from Rebellions Inc.
23
- """RBLNStableDiffusionPipeline class for inference of diffusion models on rbln devices."""
24
23
 
25
24
  from diffusers import StableDiffusionPipeline
26
25
 
27
- from ....modeling_diffusers import RBLNDiffusionMixin
26
+ from ...modeling_diffusers import RBLNDiffusionMixin
28
27
 
29
28
 
30
29
  class RBLNStableDiffusionPipeline(RBLNDiffusionMixin, StableDiffusionPipeline):
30
+ original_class = StableDiffusionPipeline
31
31
  _submodules = ["text_encoder", "unet", "vae"]
@@ -20,24 +20,12 @@
20
20
  # are the intellectual property of Rebellions Inc. and may not be
21
21
  # copied, modified, or distributed without prior written permission
22
22
  # from Rebellions Inc.
23
- """RBLNStableDiffusionPipeline class for inference of diffusion models on rbln devices."""
24
23
 
25
24
  from diffusers import StableDiffusionImg2ImgPipeline
26
25
 
27
- from ....modeling_diffusers import RBLNDiffusionMixin
26
+ from ...modeling_diffusers import RBLNDiffusionMixin
28
27
 
29
28
 
30
29
  class RBLNStableDiffusionImg2ImgPipeline(RBLNDiffusionMixin, StableDiffusionImg2ImgPipeline):
31
- """
32
- Pipeline for image-to-image generation using Stable Diffusion.
33
-
34
- This model inherits from [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods
35
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
36
-
37
- It implements the methods to convert a pre-trained Stable Diffusion pipeline into a RBLNStableDiffusion pipeline by:
38
- - transferring the checkpoint weights of the original into an optimized RBLN graph,
39
- - compiling the resulting graph using the RBLN compiler.
40
- """
41
-
42
30
  original_class = StableDiffusionImg2ImgPipeline
43
31
  _submodules = ["text_encoder", "unet", "vae"]
@@ -0,0 +1,31 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Portions of this software are licensed under the Apache License,
16
+ # Version 2.0. See the NOTICE file distributed with this work for
17
+ # additional information regarding copyright ownership.
18
+
19
+ # All other portions of this software, including proprietary code,
20
+ # are the intellectual property of Rebellions Inc. and may not be
21
+ # copied, modified, or distributed without prior written permission
22
+ # from Rebellions Inc.
23
+
24
+ from diffusers import StableDiffusionInpaintPipeline
25
+
26
+ from ...modeling_diffusers import RBLNDiffusionMixin
27
+
28
+
29
+ class RBLNStableDiffusionInpaintPipeline(RBLNDiffusionMixin, StableDiffusionInpaintPipeline):
30
+ original_class = StableDiffusionInpaintPipeline
31
+ _submodules = ["text_encoder", "unet", "vae"]
@@ -0,0 +1,26 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Portions of this software are licensed under the Apache License,
16
+ # Version 2.0. See the NOTICE file distributed with this work for
17
+ # additional information regarding copyright ownership.
18
+
19
+ # All other portions of this software, including proprietary code,
20
+ # are the intellectual property of Rebellions Inc. and may not be
21
+ # copied, modified, or distributed without prior written permission
22
+ # from Rebellions Inc.
23
+
24
+ from .pipeline_stable_diffusion_3 import RBLNStableDiffusion3Pipeline
25
+ from .pipeline_stable_diffusion_3_img2img import RBLNStableDiffusion3Img2ImgPipeline
26
+ from .pipeline_stable_diffusion_3_inpaint import RBLNStableDiffusion3InpaintPipeline
@@ -0,0 +1,31 @@
1
+ # Copyright 2024 Rebellions Inc.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at:
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Portions of this software are licensed under the Apache License,
16
+ # Version 2.0. See the NOTICE file distributed with this work for
17
+ # additional information regarding copyright ownership.
18
+
19
+ # All other portions of this software, including proprietary code,
20
+ # are the intellectual property of Rebellions Inc. and may not be
21
+ # copied, modified, or distributed without prior written permission
22
+ # from Rebellions Inc.
23
+
24
+ from diffusers import StableDiffusion3Pipeline
25
+
26
+ from ...modeling_diffusers import RBLNDiffusionMixin
27
+
28
+
29
+ class RBLNStableDiffusion3Pipeline(RBLNDiffusionMixin, StableDiffusion3Pipeline):
30
+ original_class = StableDiffusion3Pipeline
31
+ _submodules = ["transformer", "text_encoder_3", "text_encoder", "text_encoder_2", "vae"]