optimum-rbln 0.1.12__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- optimum/rbln/__init__.py +27 -13
- optimum/rbln/__version__.py +16 -1
- optimum/rbln/diffusers/__init__.py +22 -2
- optimum/rbln/diffusers/models/__init__.py +34 -3
- optimum/rbln/{transformers/generation → diffusers/models/autoencoders}/__init__.py +1 -2
- optimum/rbln/diffusers/models/{autoencoder_kl.py → autoencoders/autoencoder_kl.py} +66 -111
- optimum/rbln/diffusers/models/autoencoders/vae.py +84 -0
- optimum/rbln/diffusers/models/controlnet.py +85 -65
- optimum/rbln/diffusers/models/transformers/__init__.py +24 -0
- optimum/rbln/diffusers/models/transformers/transformer_sd3.py +203 -0
- optimum/rbln/diffusers/models/unets/__init__.py +24 -0
- optimum/rbln/diffusers/models/{unet_2d_condition.py → unets/unet_2d_condition.py} +129 -163
- optimum/rbln/diffusers/pipelines/__init__.py +60 -12
- optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +11 -25
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +9 -185
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +9 -190
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +9 -191
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +9 -192
- optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +1 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +4 -110
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +4 -118
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +26 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +1 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +18 -128
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +18 -131
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +32 -0
- optimum/rbln/modeling.py +572 -0
- optimum/rbln/modeling_alias.py +1 -1
- optimum/rbln/modeling_base.py +176 -763
- optimum/rbln/modeling_diffusers.py +329 -0
- optimum/rbln/transformers/__init__.py +2 -2
- optimum/rbln/transformers/cache_utils.py +5 -9
- optimum/rbln/transformers/modeling_rope_utils.py +283 -0
- optimum/rbln/transformers/models/__init__.py +80 -31
- optimum/rbln/transformers/models/auto/auto_factory.py +117 -23
- optimum/rbln/transformers/models/auto/modeling_auto.py +37 -12
- optimum/rbln/transformers/models/bart/modeling_bart.py +3 -6
- optimum/rbln/transformers/models/bert/modeling_bert.py +3 -6
- optimum/rbln/transformers/models/clip/modeling_clip.py +8 -34
- optimum/rbln/transformers/models/decoderonly/__init__.py +0 -5
- optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +779 -361
- optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +83 -142
- optimum/rbln/transformers/models/dpt/modeling_dpt.py +1 -1
- optimum/rbln/transformers/models/exaone/exaone_architecture.py +64 -39
- optimum/rbln/transformers/models/exaone/modeling_exaone.py +6 -29
- optimum/rbln/transformers/models/gemma/gemma_architecture.py +31 -92
- optimum/rbln/transformers/models/gemma/modeling_gemma.py +4 -28
- optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +50 -238
- optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +6 -31
- optimum/rbln/transformers/models/llama/modeling_llama.py +4 -28
- optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +29 -83
- optimum/rbln/transformers/models/midm/midm_architecture.py +88 -253
- optimum/rbln/transformers/models/midm/modeling_midm.py +8 -33
- optimum/rbln/transformers/models/mistral/modeling_mistral.py +4 -29
- optimum/rbln/transformers/models/phi/modeling_phi.py +5 -31
- optimum/rbln/transformers/models/phi/phi_architecture.py +61 -345
- optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +5 -29
- optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +1 -46
- optimum/rbln/transformers/models/t5/__init__.py +1 -1
- optimum/rbln/transformers/models/t5/modeling_t5.py +157 -6
- optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +1 -1
- optimum/rbln/transformers/models/whisper/modeling_whisper.py +2 -2
- optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +3 -35
- optimum/rbln/transformers/utils/rbln_quantization.py +128 -5
- optimum/rbln/utils/decorator_utils.py +59 -0
- optimum/rbln/utils/hub.py +131 -0
- optimum/rbln/utils/import_utils.py +21 -0
- optimum/rbln/utils/model_utils.py +53 -0
- optimum/rbln/utils/runtime_utils.py +5 -5
- optimum/rbln/utils/submodule.py +114 -0
- optimum/rbln/utils/timer_utils.py +2 -2
- optimum_rbln-0.1.15.dist-info/METADATA +106 -0
- optimum_rbln-0.1.15.dist-info/RECORD +110 -0
- {optimum_rbln-0.1.12.dist-info → optimum_rbln-0.1.15.dist-info}/WHEEL +1 -1
- optimum/rbln/transformers/generation/streamers.py +0 -139
- optimum/rbln/transformers/generation/utils.py +0 -397
- optimum/rbln/transformers/models/exaone/hf_hub_cached/configuration_exaone.py +0 -181
- optimum/rbln/transformers/models/exaone/hf_hub_cached/modeling_exaone.py +0 -1725
- optimum/rbln/transformers/models/midm/hf_hub_cached/configuration_midm.py +0 -22
- optimum/rbln/transformers/models/midm/hf_hub_cached/midm_bitext_tokenization.py +0 -304
- optimum/rbln/transformers/models/midm/hf_hub_cached/modeling_midm.py +0 -1469
- optimum/rbln/transformers/models/midm/hf_hub_cached/rotary_position_embedding.py +0 -98
- optimum_rbln-0.1.12.dist-info/METADATA +0 -119
- optimum_rbln-0.1.12.dist-info/RECORD +0 -103
- optimum_rbln-0.1.12.dist-info/entry_points.txt +0 -4
- {optimum_rbln-0.1.12.dist-info → optimum_rbln-0.1.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,142 +1,32 @@
|
|
1
|
-
#
|
2
|
-
|
3
|
-
#
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
2
|
+
|
4
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
4
|
# you may not use this file except in compliance with the License.
|
6
|
-
# You may obtain a copy of the License at
|
7
|
-
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
8
7
|
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
-
|
8
|
+
|
10
9
|
# Unless required by applicable law or agreed to in writing, software
|
11
10
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
12
|
# See the License for the specific language governing permissions and
|
14
13
|
# limitations under the License.
|
15
|
-
"""RBLNStableDiffusionXLPipeline class for inference of diffusion models on rbln devices."""
|
16
|
-
|
17
|
-
from diffusers import StableDiffusionXLPipeline
|
18
|
-
|
19
|
-
from ....modeling_config import use_rbln_config
|
20
|
-
from ....transformers import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection
|
21
|
-
from ....utils.runtime_utils import ContextRblnConfig
|
22
|
-
from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
|
23
|
-
|
24
|
-
|
25
|
-
class RBLNStableDiffusionXLPipeline(StableDiffusionXLPipeline):
|
26
|
-
@classmethod
|
27
|
-
@use_rbln_config
|
28
|
-
def from_pretrained(cls, model_id, **kwargs):
|
29
|
-
"""
|
30
|
-
Pipeline for text-to-image generation using Stable Diffusion XL.
|
31
|
-
|
32
|
-
This model inherits from [`StableDiffusionXLPipeline`]. Check the superclass documentation for the generic methods the
|
33
|
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
34
|
-
|
35
|
-
It implements the methods to convert a pre-trained StableDiffusionXL pipeline into a RBLNStableDiffusionXL pipeline by:
|
36
|
-
- transferring the checkpoint weights of the original into an optimized RBLN graph,
|
37
|
-
- compiling the resulting graph using the RBLN compiler.
|
38
14
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
|
43
|
-
- A path to a *directory* containing a model saved using [`~OptimizedModel.save_pretrained`],
|
44
|
-
"""
|
45
|
-
export = kwargs.pop("export", None)
|
46
|
-
model_save_dir = kwargs.pop("model_save_dir", None)
|
47
|
-
rbln_config = kwargs.pop("rbln_config", None)
|
48
|
-
rbln_config = {} if rbln_config is None else rbln_config
|
15
|
+
# Portions of this software are licensed under the Apache License,
|
16
|
+
# Version 2.0. See the NOTICE file distributed with this work for
|
17
|
+
# additional information regarding copyright ownership.
|
49
18
|
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
with ContextRblnConfig(
|
56
|
-
device=device,
|
57
|
-
device_map=device_map,
|
58
|
-
create_runtimes=create_runtimes,
|
59
|
-
optimze_host_mem=optimize_host_memory,
|
60
|
-
):
|
61
|
-
model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
|
62
|
-
|
63
|
-
if export is None or export is False:
|
64
|
-
return model
|
65
|
-
|
66
|
-
do_classifier_free_guidance = (
|
67
|
-
rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
|
68
|
-
)
|
69
|
-
|
70
|
-
vae = RBLNAutoencoderKL.from_pretrained(
|
71
|
-
model_id=model_id,
|
72
|
-
subfolder="vae",
|
73
|
-
export=True,
|
74
|
-
model_save_dir=model_save_dir,
|
75
|
-
rbln_unet_sample_size=model.unet.config.sample_size,
|
76
|
-
rbln_use_encode=False,
|
77
|
-
rbln_config={**rbln_config},
|
78
|
-
)
|
79
|
-
text_encoder = RBLNCLIPTextModel.from_pretrained(
|
80
|
-
model_id=model_id,
|
81
|
-
subfolder="text_encoder",
|
82
|
-
export=True,
|
83
|
-
model_save_dir=model_save_dir,
|
84
|
-
rbln_config={**rbln_config},
|
85
|
-
)
|
86
|
-
text_encoder_2 = RBLNCLIPTextModelWithProjection.from_pretrained(
|
87
|
-
model_id=model_id,
|
88
|
-
subfolder="text_encoder_2",
|
89
|
-
export=True,
|
90
|
-
model_save_dir=model_save_dir,
|
91
|
-
rbln_config={**rbln_config},
|
92
|
-
)
|
93
|
-
|
94
|
-
batch_size = rbln_config.pop("batch_size", 1)
|
95
|
-
unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
|
96
|
-
|
97
|
-
unet = RBLNUNet2DConditionModel.from_pretrained(
|
98
|
-
model_id=model_id,
|
99
|
-
subfolder="unet",
|
100
|
-
export=True,
|
101
|
-
model_save_dir=model_save_dir,
|
102
|
-
rbln_max_seq_len=model.text_encoder.config.max_position_embeddings,
|
103
|
-
rbln_text_model_hidden_size=model.text_encoder_2.config.hidden_size,
|
104
|
-
rbln_batch_size=unet_batch_size,
|
105
|
-
rbln_use_encode=False,
|
106
|
-
rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
|
107
|
-
rbln_config={**rbln_config},
|
108
|
-
)
|
109
|
-
|
110
|
-
if model_save_dir is not None:
|
111
|
-
# To skip saving original pytorch modules
|
112
|
-
del (model.vae, model.text_encoder, model.unet, model.text_encoder_2)
|
113
|
-
|
114
|
-
# Direct calling of `save_pretrained` causes config.unet = (None, None).
|
115
|
-
# So config must be saved again, later.
|
116
|
-
model.save_pretrained(model_save_dir)
|
19
|
+
# All other portions of this software, including proprietary code,
|
20
|
+
# are the intellectual property of Rebellions Inc. and may not be
|
21
|
+
# copied, modified, or distributed without prior written permission
|
22
|
+
# from Rebellions Inc.
|
23
|
+
"""RBLNStableDiffusionXLPipeline class for inference of diffusion models on rbln devices."""
|
117
24
|
|
118
|
-
|
119
|
-
model.text_encoder = text_encoder
|
120
|
-
model.unet = unet
|
121
|
-
model.text_encoder_2 = text_encoder_2
|
122
|
-
update_dict = {
|
123
|
-
"vae": ("optimum.rbln", "RBLNAutoencoderKL"),
|
124
|
-
"text_encoder": ("optimum.rbln", "RBLNCLIPTextModel"),
|
125
|
-
"unet": ("optimum.rbln", "RBLNUNet2DConditionModel"),
|
126
|
-
"text_encoder_2": ("optimum.rbln", "RBLNCLIPTextModelWithProjection"),
|
127
|
-
}
|
128
|
-
model.register_to_config(**update_dict)
|
25
|
+
from diffusers import StableDiffusionXLPipeline
|
129
26
|
|
130
|
-
|
131
|
-
# overwrite to replace incorrect config
|
132
|
-
model.save_config(model_save_dir)
|
27
|
+
from ....modeling_diffusers import RBLNDiffusionMixin
|
133
28
|
|
134
|
-
if optimize_host_memory is False:
|
135
|
-
model.compiled_models = [
|
136
|
-
vae.compiled_models[0],
|
137
|
-
unet.compiled_models[0],
|
138
|
-
text_encoder.compiled_models[0],
|
139
|
-
text_encoder_2.compiled_models[0],
|
140
|
-
]
|
141
29
|
|
142
|
-
|
30
|
+
class RBLNStableDiffusionXLPipeline(RBLNDiffusionMixin, StableDiffusionXLPipeline):
|
31
|
+
original_class = StableDiffusionXLPipeline
|
32
|
+
_submodules = ["text_encoder", "text_encoder_2", "unet", "vae"]
|
optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py
CHANGED
@@ -1,145 +1,32 @@
|
|
1
|
-
#
|
2
|
-
|
3
|
-
#
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
2
|
+
|
4
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
4
|
# you may not use this file except in compliance with the License.
|
6
|
-
# You may obtain a copy of the License at
|
7
|
-
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
8
7
|
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
-
|
8
|
+
|
10
9
|
# Unless required by applicable law or agreed to in writing, software
|
11
10
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
11
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
12
|
# See the License for the specific language governing permissions and
|
14
13
|
# limitations under the License.
|
15
|
-
"""RBLNStableDiffusionXLPipeline class for inference of diffusion models on rbln devices."""
|
16
|
-
|
17
|
-
from diffusers import StableDiffusionXLImg2ImgPipeline
|
18
|
-
|
19
|
-
from ....modeling_config import use_rbln_config
|
20
|
-
from ....transformers import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection
|
21
|
-
from ....utils.runtime_utils import ContextRblnConfig
|
22
|
-
from ...models import RBLNAutoencoderKL, RBLNUNet2DConditionModel
|
23
|
-
|
24
|
-
|
25
|
-
class RBLNStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
|
26
|
-
@classmethod
|
27
|
-
@use_rbln_config
|
28
|
-
def from_pretrained(cls, model_id, **kwargs):
|
29
|
-
"""
|
30
|
-
Pipeline for image-to-image generation using Stable Diffusion XL.
|
31
|
-
|
32
|
-
This model inherits from [`StableDiffusionXLPipeline`]. Check the superclass documentation for the generic methods the
|
33
|
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
34
|
-
|
35
|
-
It implements the methods to convert a pre-trained StableDiffusionXL pipeline into a RBLNStableDiffusionXL pipeline by:
|
36
|
-
- transferring the checkpoint weights of the original into an optimized RBLN graph,
|
37
|
-
- compiling the resulting graph using the RBLN compiler.
|
38
14
|
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
|
43
|
-
- A path to a *directory* containing a model saved using [`~OptimizedModel.save_pretrained`],
|
44
|
-
"""
|
45
|
-
export = kwargs.pop("export", None)
|
46
|
-
model_save_dir = kwargs.pop("model_save_dir", None)
|
47
|
-
rbln_config = kwargs.pop("rbln_config", None)
|
48
|
-
rbln_config = {} if rbln_config is None else rbln_config
|
15
|
+
# Portions of this software are licensed under the Apache License,
|
16
|
+
# Version 2.0. See the NOTICE file distributed with this work for
|
17
|
+
# additional information regarding copyright ownership.
|
49
18
|
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
with ContextRblnConfig(
|
56
|
-
device=device,
|
57
|
-
device_map=device_map,
|
58
|
-
create_runtimes=create_runtimes,
|
59
|
-
optimze_host_mem=optimize_host_memory,
|
60
|
-
):
|
61
|
-
model = super().from_pretrained(pretrained_model_name_or_path=model_id, **kwargs)
|
62
|
-
|
63
|
-
if export is None or export is False:
|
64
|
-
return model
|
65
|
-
|
66
|
-
do_classifier_free_guidance = (
|
67
|
-
rbln_config.pop("guidance_scale", 5.0) > 1.0 and model.unet.config.time_cond_proj_dim is None
|
68
|
-
)
|
69
|
-
|
70
|
-
vae = RBLNAutoencoderKL.from_pretrained(
|
71
|
-
model_id=model_id,
|
72
|
-
subfolder="vae",
|
73
|
-
export=True,
|
74
|
-
model_save_dir=model_save_dir,
|
75
|
-
rbln_unet_sample_size=model.unet.config.sample_size,
|
76
|
-
rbln_use_encode=True,
|
77
|
-
rbln_vae_scale_factor=model.vae_scale_factor,
|
78
|
-
rbln_config={**rbln_config},
|
79
|
-
)
|
80
|
-
text_encoder = RBLNCLIPTextModel.from_pretrained(
|
81
|
-
model_id=model_id,
|
82
|
-
subfolder="text_encoder",
|
83
|
-
export=True,
|
84
|
-
model_save_dir=model_save_dir,
|
85
|
-
rbln_config={**rbln_config},
|
86
|
-
)
|
87
|
-
text_encoder_2 = RBLNCLIPTextModelWithProjection.from_pretrained(
|
88
|
-
model_id=model_id,
|
89
|
-
subfolder="text_encoder_2",
|
90
|
-
export=True,
|
91
|
-
model_save_dir=model_save_dir,
|
92
|
-
rbln_config={**rbln_config},
|
93
|
-
)
|
94
|
-
|
95
|
-
batch_size = rbln_config.pop("batch_size", 1)
|
96
|
-
unet_batch_size = batch_size * 2 if do_classifier_free_guidance else batch_size
|
97
|
-
|
98
|
-
unet = RBLNUNet2DConditionModel.from_pretrained(
|
99
|
-
model_id=model_id,
|
100
|
-
subfolder="unet",
|
101
|
-
export=True,
|
102
|
-
model_save_dir=model_save_dir,
|
103
|
-
rbln_max_seq_len=model.text_encoder.config.max_position_embeddings,
|
104
|
-
rbln_text_model_hidden_size=model.text_encoder_2.config.hidden_size,
|
105
|
-
rbln_batch_size=unet_batch_size,
|
106
|
-
rbln_use_encode=True,
|
107
|
-
rbln_vae_scale_factor=model.vae_scale_factor,
|
108
|
-
rbln_is_controlnet=True if "controlnet" in model.config.keys() else False,
|
109
|
-
rbln_config={**rbln_config},
|
110
|
-
)
|
111
|
-
|
112
|
-
if model_save_dir is not None:
|
113
|
-
# To skip saving original pytorch modules
|
114
|
-
del (model.vae, model.text_encoder, model.unet, model.text_encoder_2)
|
115
|
-
|
116
|
-
# Direct calling of `save_pretrained` causes config.unet = (None, None).
|
117
|
-
# So config must be saved again, later.
|
118
|
-
model.save_pretrained(model_save_dir)
|
19
|
+
# All other portions of this software, including proprietary code,
|
20
|
+
# are the intellectual property of Rebellions Inc. and may not be
|
21
|
+
# copied, modified, or distributed without prior written permission
|
22
|
+
# from Rebellions Inc.
|
23
|
+
"""RBLNStableDiffusionXLPipeline class for inference of diffusion models on rbln devices."""
|
119
24
|
|
120
|
-
|
121
|
-
model.text_encoder = text_encoder
|
122
|
-
model.unet = unet
|
123
|
-
model.text_encoder_2 = text_encoder_2
|
124
|
-
update_dict = {
|
125
|
-
"vae": ("optimum.rbln", "RBLNAutoencoderKL"),
|
126
|
-
"text_encoder": ("optimum.rbln", "RBLNCLIPTextModel"),
|
127
|
-
"unet": ("optimum.rbln", "RBLNUNet2DConditionModel"),
|
128
|
-
"text_encoder_2": ("optimum.rbln", "RBLNCLIPTextModelWithProjection"),
|
129
|
-
}
|
130
|
-
model.register_to_config(**update_dict)
|
25
|
+
from diffusers import StableDiffusionXLImg2ImgPipeline
|
131
26
|
|
132
|
-
|
133
|
-
# overwrite to replace incorrect config
|
134
|
-
model.save_config(model_save_dir)
|
27
|
+
from ....modeling_diffusers import RBLNDiffusionMixin
|
135
28
|
|
136
|
-
if optimize_host_memory is False:
|
137
|
-
model.compiled_models = [
|
138
|
-
vae.compiled_models[0],
|
139
|
-
vae.compiled_models[1],
|
140
|
-
unet.compiled_models[0],
|
141
|
-
text_encoder.compiled_models[0],
|
142
|
-
text_encoder_2.compiled_models[0],
|
143
|
-
]
|
144
29
|
|
145
|
-
|
30
|
+
class RBLNStableDiffusionXLImg2ImgPipeline(RBLNDiffusionMixin, StableDiffusionXLImg2ImgPipeline):
|
31
|
+
original_class = StableDiffusionXLImg2ImgPipeline
|
32
|
+
_submodules = ["text_encoder", "text_encoder_2", "unet", "vae"]
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
2
|
+
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
# Portions of this software are licensed under the Apache License,
|
16
|
+
# Version 2.0. See the NOTICE file distributed with this work for
|
17
|
+
# additional information regarding copyright ownership.
|
18
|
+
|
19
|
+
# All other portions of this software, including proprietary code,
|
20
|
+
# are the intellectual property of Rebellions Inc. and may not be
|
21
|
+
# copied, modified, or distributed without prior written permission
|
22
|
+
# from Rebellions Inc.
|
23
|
+
"""RBLNStableDiffusionXLInpaintPipeline class for inference of diffusion models on rbln devices."""
|
24
|
+
|
25
|
+
from diffusers import StableDiffusionXLInpaintPipeline
|
26
|
+
|
27
|
+
from ....modeling_diffusers import RBLNDiffusionMixin
|
28
|
+
|
29
|
+
|
30
|
+
class RBLNStableDiffusionXLInpaintPipeline(RBLNDiffusionMixin, StableDiffusionXLInpaintPipeline):
|
31
|
+
original_class = StableDiffusionXLInpaintPipeline
|
32
|
+
_submodules = ["text_encoder", "text_encoder_2", "unet", "vae"]
|