optimum-rbln 0.9.3.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of optimum-rbln might be problematic. Click here for more details.
- optimum/rbln/__init__.py +505 -0
- optimum/rbln/__version__.py +34 -0
- optimum/rbln/cli.py +660 -0
- optimum/rbln/configuration_utils.py +968 -0
- optimum/rbln/diffusers/__init__.py +198 -0
- optimum/rbln/diffusers/configurations/__init__.py +37 -0
- optimum/rbln/diffusers/configurations/models/__init__.py +10 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl.py +73 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_cosmos.py +84 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_temporal_decoder.py +67 -0
- optimum/rbln/diffusers/configurations/models/configuration_controlnet.py +64 -0
- optimum/rbln/diffusers/configurations/models/configuration_prior_transformer.py +59 -0
- optimum/rbln/diffusers/configurations/models/configuration_transformer_cosmos.py +78 -0
- optimum/rbln/diffusers/configurations/models/configuration_transformer_sd3.py +63 -0
- optimum/rbln/diffusers/configurations/models/configuration_unet_2d_condition.py +81 -0
- optimum/rbln/diffusers/configurations/models/configuration_unet_spatio_temporal_condition.py +59 -0
- optimum/rbln/diffusers/configurations/models/configuration_vq_model.py +74 -0
- optimum/rbln/diffusers/configurations/pipelines/__init__.py +34 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_controlnet.py +316 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_cosmos.py +117 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_kandinsky2_2.py +363 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion.py +156 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_3.py +176 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_xl.py +159 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_video_diffusion.py +114 -0
- optimum/rbln/diffusers/modeling_diffusers.py +451 -0
- optimum/rbln/diffusers/models/__init__.py +64 -0
- optimum/rbln/diffusers/models/autoencoders/__init__.py +18 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py +255 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_cosmos.py +245 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +275 -0
- optimum/rbln/diffusers/models/autoencoders/vae.py +178 -0
- optimum/rbln/diffusers/models/autoencoders/vq_model.py +211 -0
- optimum/rbln/diffusers/models/controlnet.py +281 -0
- optimum/rbln/diffusers/models/transformers/__init__.py +17 -0
- optimum/rbln/diffusers/models/transformers/prior_transformer.py +160 -0
- optimum/rbln/diffusers/models/transformers/transformer_cosmos.py +344 -0
- optimum/rbln/diffusers/models/transformers/transformer_sd3.py +191 -0
- optimum/rbln/diffusers/models/unets/__init__.py +16 -0
- optimum/rbln/diffusers/models/unets/unet_2d_condition.py +408 -0
- optimum/rbln/diffusers/models/unets/unet_spatio_temporal_condition.py +201 -0
- optimum/rbln/diffusers/pipelines/__init__.py +113 -0
- optimum/rbln/diffusers/pipelines/auto_pipeline.py +307 -0
- optimum/rbln/diffusers/pipelines/controlnet/__init__.py +19 -0
- optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +139 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +669 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +640 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +825 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +837 -0
- optimum/rbln/diffusers/pipelines/cosmos/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/cosmos/configuration_cosmos_guardrail.py +113 -0
- optimum/rbln/diffusers/pipelines/cosmos/cosmos_guardrail.py +425 -0
- optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +128 -0
- optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +128 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py +23 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +207 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_video_diffusion/__init__.py +15 -0
- optimum/rbln/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +46 -0
- optimum/rbln/modeling.py +364 -0
- optimum/rbln/modeling_base.py +637 -0
- optimum/rbln/ops/__init__.py +19 -0
- optimum/rbln/ops/attn.py +455 -0
- optimum/rbln/ops/flash_attn.py +350 -0
- optimum/rbln/ops/kv_cache_update.py +29 -0
- optimum/rbln/ops/linear.py +32 -0
- optimum/rbln/ops/sliding_window_attn.py +111 -0
- optimum/rbln/transformers/__init__.py +340 -0
- optimum/rbln/transformers/configuration_generic.py +120 -0
- optimum/rbln/transformers/modeling_attention_utils.py +385 -0
- optimum/rbln/transformers/modeling_generic.py +280 -0
- optimum/rbln/transformers/modeling_outputs.py +37 -0
- optimum/rbln/transformers/modeling_rope_utils.py +314 -0
- optimum/rbln/transformers/models/__init__.py +343 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/__init__.py +17 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +47 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +91 -0
- optimum/rbln/transformers/models/auto/__init__.py +31 -0
- optimum/rbln/transformers/models/auto/auto_factory.py +267 -0
- optimum/rbln/transformers/models/auto/modeling_auto.py +162 -0
- optimum/rbln/transformers/models/bart/__init__.py +17 -0
- optimum/rbln/transformers/models/bart/bart_architecture.py +163 -0
- optimum/rbln/transformers/models/bart/configuration_bart.py +36 -0
- optimum/rbln/transformers/models/bart/modeling_bart.py +86 -0
- optimum/rbln/transformers/models/bert/__init__.py +16 -0
- optimum/rbln/transformers/models/bert/bert_architecture.py +16 -0
- optimum/rbln/transformers/models/bert/configuration_bert.py +46 -0
- optimum/rbln/transformers/models/bert/modeling_bert.py +148 -0
- optimum/rbln/transformers/models/blip_2/__init__.py +20 -0
- optimum/rbln/transformers/models/blip_2/configuration_blip_2.py +115 -0
- optimum/rbln/transformers/models/blip_2/modeling_blip_2.py +526 -0
- optimum/rbln/transformers/models/clip/__init__.py +26 -0
- optimum/rbln/transformers/models/clip/configuration_clip.py +103 -0
- optimum/rbln/transformers/models/clip/modeling_clip.py +384 -0
- optimum/rbln/transformers/models/colpali/__init__.py +2 -0
- optimum/rbln/transformers/models/colpali/colpali_architecture.py +218 -0
- optimum/rbln/transformers/models/colpali/configuration_colpali.py +84 -0
- optimum/rbln/transformers/models/colpali/modeling_colpali.py +361 -0
- optimum/rbln/transformers/models/colqwen2/__init__.py +2 -0
- optimum/rbln/transformers/models/colqwen2/colqwen2_architecture.py +233 -0
- optimum/rbln/transformers/models/colqwen2/configuration_colqwen2.py +74 -0
- optimum/rbln/transformers/models/colqwen2/modeling_colqwen2.py +446 -0
- optimum/rbln/transformers/models/decoderonly/__init__.py +27 -0
- optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py +300 -0
- optimum/rbln/transformers/models/decoderonly/configuration_lora.py +411 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +1224 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_runtime_utils.py +508 -0
- optimum/rbln/transformers/models/decoderonly/generation_decoderonly.py +119 -0
- optimum/rbln/transformers/models/decoderonly/lora_architecture.py +204 -0
- optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +823 -0
- optimum/rbln/transformers/models/depth_anything/__init__.py +16 -0
- optimum/rbln/transformers/models/depth_anything/configuration_depth_anything.py +24 -0
- optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py +42 -0
- optimum/rbln/transformers/models/distilbert/__init__.py +19 -0
- optimum/rbln/transformers/models/distilbert/configuration_distilbert.py +24 -0
- optimum/rbln/transformers/models/distilbert/modeling_distilbert.py +51 -0
- optimum/rbln/transformers/models/dpt/__init__.py +16 -0
- optimum/rbln/transformers/models/dpt/configuration_dpt.py +24 -0
- optimum/rbln/transformers/models/dpt/modeling_dpt.py +42 -0
- optimum/rbln/transformers/models/exaone/__init__.py +24 -0
- optimum/rbln/transformers/models/exaone/configuration_exaone.py +42 -0
- optimum/rbln/transformers/models/exaone/exaone_architecture.py +77 -0
- optimum/rbln/transformers/models/exaone/modeling_exaone.py +145 -0
- optimum/rbln/transformers/models/gemma/__init__.py +16 -0
- optimum/rbln/transformers/models/gemma/configuration_gemma.py +50 -0
- optimum/rbln/transformers/models/gemma/gemma_architecture.py +27 -0
- optimum/rbln/transformers/models/gemma/modeling_gemma.py +104 -0
- optimum/rbln/transformers/models/gemma3/__init__.py +16 -0
- optimum/rbln/transformers/models/gemma3/configuration_gemma3.py +109 -0
- optimum/rbln/transformers/models/gemma3/gemma3_architecture.py +170 -0
- optimum/rbln/transformers/models/gemma3/gemma3_runtime_utils.py +245 -0
- optimum/rbln/transformers/models/gemma3/modeling_gemma3.py +611 -0
- optimum/rbln/transformers/models/gpt2/__init__.py +16 -0
- optimum/rbln/transformers/models/gpt2/configuration_gpt2.py +50 -0
- optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +93 -0
- optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +55 -0
- optimum/rbln/transformers/models/grounding_dino/__init__.py +10 -0
- optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py +92 -0
- optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py +599 -0
- optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py +1048 -0
- optimum/rbln/transformers/models/idefics3/__init__.py +16 -0
- optimum/rbln/transformers/models/idefics3/configuration_idefics3.py +89 -0
- optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +497 -0
- optimum/rbln/transformers/models/llama/__init__.py +16 -0
- optimum/rbln/transformers/models/llama/configuration_llama.py +50 -0
- optimum/rbln/transformers/models/llama/llama_architecture.py +19 -0
- optimum/rbln/transformers/models/llama/modeling_llama.py +104 -0
- optimum/rbln/transformers/models/llava/__init__.py +16 -0
- optimum/rbln/transformers/models/llava/configuration_llava.py +72 -0
- optimum/rbln/transformers/models/llava/modeling_llava.py +490 -0
- optimum/rbln/transformers/models/llava_next/__init__.py +16 -0
- optimum/rbln/transformers/models/llava_next/configuration_llava_next.py +69 -0
- optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +493 -0
- optimum/rbln/transformers/models/midm/__init__.py +24 -0
- optimum/rbln/transformers/models/midm/configuration_midm.py +42 -0
- optimum/rbln/transformers/models/midm/midm_architecture.py +144 -0
- optimum/rbln/transformers/models/midm/modeling_midm.py +144 -0
- optimum/rbln/transformers/models/mistral/__init__.py +16 -0
- optimum/rbln/transformers/models/mistral/configuration_mistral.py +50 -0
- optimum/rbln/transformers/models/mistral/mistral_architecture.py +19 -0
- optimum/rbln/transformers/models/mistral/modeling_mistral.py +115 -0
- optimum/rbln/transformers/models/opt/__init__.py +16 -0
- optimum/rbln/transformers/models/opt/configuration_opt.py +29 -0
- optimum/rbln/transformers/models/opt/modeling_opt.py +102 -0
- optimum/rbln/transformers/models/opt/opt_architecture.py +74 -0
- optimum/rbln/transformers/models/pegasus/__init__.py +17 -0
- optimum/rbln/transformers/models/pegasus/configuration_pegasus.py +38 -0
- optimum/rbln/transformers/models/pegasus/modeling_pegasus.py +71 -0
- optimum/rbln/transformers/models/pegasus/pegasus_architecture.py +161 -0
- optimum/rbln/transformers/models/phi/__init__.py +16 -0
- optimum/rbln/transformers/models/phi/configuration_phi.py +50 -0
- optimum/rbln/transformers/models/phi/modeling_phi.py +92 -0
- optimum/rbln/transformers/models/phi/phi_architecture.py +115 -0
- optimum/rbln/transformers/models/pixtral/__init__.py +16 -0
- optimum/rbln/transformers/models/pixtral/configuration_pixtral.py +43 -0
- optimum/rbln/transformers/models/pixtral/modeling_pixtral.py +322 -0
- optimum/rbln/transformers/models/pixtral/pixtral_architecture.py +73 -0
- optimum/rbln/transformers/models/qwen2/__init__.py +16 -0
- optimum/rbln/transformers/models/qwen2/configuration_qwen2.py +50 -0
- optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +123 -0
- optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +111 -0
- optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +636 -0
- optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +220 -0
- optimum/rbln/transformers/models/qwen2_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_vl/configuration_qwen2_vl.py +88 -0
- optimum/rbln/transformers/models/qwen2_vl/modeling_qwen2_vl.py +513 -0
- optimum/rbln/transformers/models/qwen2_vl/qwen2_vl_architecture.py +165 -0
- optimum/rbln/transformers/models/qwen3/__init__.py +16 -0
- optimum/rbln/transformers/models/qwen3/configuration_qwen3.py +71 -0
- optimum/rbln/transformers/models/qwen3/modeling_qwen3.py +133 -0
- optimum/rbln/transformers/models/qwen3/qwen3_architecture.py +31 -0
- optimum/rbln/transformers/models/resnet/__init__.py +23 -0
- optimum/rbln/transformers/models/resnet/configuration_resnet.py +42 -0
- optimum/rbln/transformers/models/resnet/modeling_resnet.py +99 -0
- optimum/rbln/transformers/models/roberta/__init__.py +24 -0
- optimum/rbln/transformers/models/roberta/configuration_roberta.py +33 -0
- optimum/rbln/transformers/models/roberta/modeling_roberta.py +72 -0
- optimum/rbln/transformers/models/seq2seq/__init__.py +16 -0
- optimum/rbln/transformers/models/seq2seq/configuration_seq2seq.py +71 -0
- optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +477 -0
- optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +527 -0
- optimum/rbln/transformers/models/siglip/__init__.py +16 -0
- optimum/rbln/transformers/models/siglip/configuration_siglip.py +76 -0
- optimum/rbln/transformers/models/siglip/modeling_siglip.py +199 -0
- optimum/rbln/transformers/models/swin/__init__.py +16 -0
- optimum/rbln/transformers/models/swin/configuration_swin.py +42 -0
- optimum/rbln/transformers/models/swin/modeling_swin.py +354 -0
- optimum/rbln/transformers/models/t5/__init__.py +17 -0
- optimum/rbln/transformers/models/t5/configuration_t5.py +36 -0
- optimum/rbln/transformers/models/t5/modeling_t5.py +130 -0
- optimum/rbln/transformers/models/t5/t5_architecture.py +264 -0
- optimum/rbln/transformers/models/time_series_transformer/__init__.py +26 -0
- optimum/rbln/transformers/models/time_series_transformer/configuration_time_series_transformer.py +41 -0
- optimum/rbln/transformers/models/time_series_transformer/modeling_time_series_transformer.py +435 -0
- optimum/rbln/transformers/models/time_series_transformer/time_series_transformers_architecture.py +337 -0
- optimum/rbln/transformers/models/vit/__init__.py +19 -0
- optimum/rbln/transformers/models/vit/configuration_vit.py +24 -0
- optimum/rbln/transformers/models/vit/modeling_vit.py +44 -0
- optimum/rbln/transformers/models/wav2vec2/__init__.py +16 -0
- optimum/rbln/transformers/models/wav2vec2/configuration_wav2vec2.py +38 -0
- optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +104 -0
- optimum/rbln/transformers/models/whisper/__init__.py +17 -0
- optimum/rbln/transformers/models/whisper/configuration_whisper.py +72 -0
- optimum/rbln/transformers/models/whisper/generation_whisper.py +159 -0
- optimum/rbln/transformers/models/whisper/modeling_whisper.py +475 -0
- optimum/rbln/transformers/models/whisper/whisper_architecture.py +349 -0
- optimum/rbln/transformers/models/xlm_roberta/__init__.py +24 -0
- optimum/rbln/transformers/models/xlm_roberta/configuration_xlm_roberta.py +32 -0
- optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +82 -0
- optimum/rbln/transformers/utils/__init__.py +0 -0
- optimum/rbln/transformers/utils/rbln_quantization.py +589 -0
- optimum/rbln/transformers/utils/rbln_runtime_wrapper.py +79 -0
- optimum/rbln/utils/__init__.py +16 -0
- optimum/rbln/utils/decorator_utils.py +86 -0
- optimum/rbln/utils/deprecation.py +213 -0
- optimum/rbln/utils/hub.py +94 -0
- optimum/rbln/utils/import_utils.py +170 -0
- optimum/rbln/utils/logging.py +110 -0
- optimum/rbln/utils/model_utils.py +63 -0
- optimum/rbln/utils/runtime_utils.py +249 -0
- optimum/rbln/utils/save_utils.py +102 -0
- optimum/rbln/utils/submodule.py +152 -0
- optimum_rbln-0.9.3.post1.dist-info/METADATA +124 -0
- optimum_rbln-0.9.3.post1.dist-info/RECORD +264 -0
- optimum_rbln-0.9.3.post1.dist-info/WHEEL +4 -0
- optimum_rbln-0.9.3.post1.dist-info/entry_points.txt +2 -0
- optimum_rbln-0.9.3.post1.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Any, Optional, Tuple
|
|
16
|
+
|
|
17
|
+
from ....configuration_utils import RBLNModelConfig
|
|
18
|
+
from ....transformers import RBLNCLIPTextModelWithProjectionConfig, RBLNT5EncoderModelConfig
|
|
19
|
+
from ..models import RBLNAutoencoderKLConfig, RBLNSD3Transformer2DModelConfig
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class RBLNStableDiffusion3PipelineBaseConfig(RBLNModelConfig):
|
|
23
|
+
submodules = ["transformer", "text_encoder", "text_encoder_2", "text_encoder_3", "vae"]
|
|
24
|
+
_vae_uses_encoder = False
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
transformer: Optional[RBLNSD3Transformer2DModelConfig] = None,
|
|
29
|
+
text_encoder: Optional[RBLNCLIPTextModelWithProjectionConfig] = None,
|
|
30
|
+
text_encoder_2: Optional[RBLNCLIPTextModelWithProjectionConfig] = None,
|
|
31
|
+
text_encoder_3: Optional[RBLNT5EncoderModelConfig] = None,
|
|
32
|
+
vae: Optional[RBLNAutoencoderKLConfig] = None,
|
|
33
|
+
*,
|
|
34
|
+
max_seq_len: Optional[int] = None,
|
|
35
|
+
sample_size: Optional[Tuple[int, int]] = None,
|
|
36
|
+
image_size: Optional[Tuple[int, int]] = None,
|
|
37
|
+
batch_size: Optional[int] = None,
|
|
38
|
+
img_height: Optional[int] = None,
|
|
39
|
+
img_width: Optional[int] = None,
|
|
40
|
+
height: Optional[int] = None,
|
|
41
|
+
width: Optional[int] = None,
|
|
42
|
+
guidance_scale: Optional[float] = None,
|
|
43
|
+
**kwargs: Any,
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Args:
|
|
47
|
+
transformer (Optional[RBLNSD3Transformer2DModelConfig]): Configuration for the transformer model component.
|
|
48
|
+
Initialized as RBLNSD3Transformer2DModelConfig if not provided.
|
|
49
|
+
text_encoder (Optional[RBLNCLIPTextModelWithProjectionConfig]): Configuration for the primary text encoder.
|
|
50
|
+
Initialized as RBLNCLIPTextModelWithProjectionConfig if not provided.
|
|
51
|
+
text_encoder_2 (Optional[RBLNCLIPTextModelWithProjectionConfig]): Configuration for the secondary text encoder.
|
|
52
|
+
Initialized as RBLNCLIPTextModelWithProjectionConfig if not provided.
|
|
53
|
+
text_encoder_3 (Optional[RBLNT5EncoderModelConfig]): Configuration for the tertiary text encoder.
|
|
54
|
+
Initialized as RBLNT5EncoderModelConfig if not provided.
|
|
55
|
+
vae (Optional[RBLNAutoencoderKLConfig]): Configuration for the VAE model component.
|
|
56
|
+
Initialized as RBLNAutoencoderKLConfig if not provided.
|
|
57
|
+
max_seq_len (Optional[int]): Maximum sequence length for text inputs. Defaults to 256.
|
|
58
|
+
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the transformer model.
|
|
59
|
+
image_size (Optional[Tuple[int, int]]): Dimensions for the generated images.
|
|
60
|
+
Cannot be used together with img_height/img_width.
|
|
61
|
+
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
|
62
|
+
img_height (Optional[int]): Height of the generated images.
|
|
63
|
+
img_width (Optional[int]): Width of the generated images.
|
|
64
|
+
height (Optional[int]): Height of the generated images.
|
|
65
|
+
width (Optional[int]): Width of the generated images.
|
|
66
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
|
67
|
+
kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
ValueError: If both image_size and img_height/img_width are provided.
|
|
71
|
+
|
|
72
|
+
Note:
|
|
73
|
+
When guidance_scale > 1.0, the transformer batch size is automatically doubled to
|
|
74
|
+
accommodate classifier-free guidance.
|
|
75
|
+
"""
|
|
76
|
+
super().__init__(**kwargs)
|
|
77
|
+
|
|
78
|
+
# Initial check for image_size conflict remains as is
|
|
79
|
+
if image_size is not None and (
|
|
80
|
+
img_height is not None or img_width is not None or height is not None or width is not None
|
|
81
|
+
):
|
|
82
|
+
raise ValueError("image_size cannot be provided alongside img_height/img_width or height/width")
|
|
83
|
+
|
|
84
|
+
# Prioritize height/width (HF-aligned)
|
|
85
|
+
if height is not None and width is not None:
|
|
86
|
+
if img_height is not None or img_width is not None:
|
|
87
|
+
# Raise error if both sets of arguments are provided
|
|
88
|
+
raise ValueError(
|
|
89
|
+
"Cannot provide both 'height'/'width' and 'img_height'/'img_width' simultaneously. "
|
|
90
|
+
"Please use one set of arguments for image dimensions, preferring 'height'/'width'."
|
|
91
|
+
)
|
|
92
|
+
image_size = (height, width)
|
|
93
|
+
elif (height is not None and width is None) or (height is None and width is not None):
|
|
94
|
+
raise ValueError("Both height and width must be provided together if used")
|
|
95
|
+
# Fallback to img_height/img_width for backward compatibility
|
|
96
|
+
elif img_height is not None and img_width is not None:
|
|
97
|
+
image_size = (img_height, img_width)
|
|
98
|
+
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
99
|
+
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
100
|
+
|
|
101
|
+
max_seq_len = max_seq_len or 256
|
|
102
|
+
|
|
103
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
104
|
+
text_encoder,
|
|
105
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
106
|
+
batch_size=batch_size,
|
|
107
|
+
)
|
|
108
|
+
self.text_encoder_2 = self.initialize_submodule_config(
|
|
109
|
+
text_encoder_2,
|
|
110
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
111
|
+
batch_size=batch_size,
|
|
112
|
+
)
|
|
113
|
+
self.text_encoder_3 = self.initialize_submodule_config(
|
|
114
|
+
text_encoder_3,
|
|
115
|
+
cls_name="RBLNT5EncoderModelConfig",
|
|
116
|
+
batch_size=batch_size,
|
|
117
|
+
max_seq_len=max_seq_len,
|
|
118
|
+
model_input_names=["input_ids"],
|
|
119
|
+
)
|
|
120
|
+
self.transformer = self.initialize_submodule_config(
|
|
121
|
+
transformer,
|
|
122
|
+
cls_name="RBLNSD3Transformer2DModelConfig",
|
|
123
|
+
sample_size=sample_size,
|
|
124
|
+
)
|
|
125
|
+
self.vae = self.initialize_submodule_config(
|
|
126
|
+
vae,
|
|
127
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
128
|
+
batch_size=batch_size,
|
|
129
|
+
uses_encoder=self.__class__._vae_uses_encoder,
|
|
130
|
+
sample_size=image_size,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Get default guidance scale from original class to set Transformer batch size
|
|
134
|
+
if guidance_scale is None:
|
|
135
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
|
136
|
+
|
|
137
|
+
if not self.transformer.batch_size_is_specified:
|
|
138
|
+
do_classifier_free_guidance = guidance_scale > 1.0
|
|
139
|
+
if do_classifier_free_guidance:
|
|
140
|
+
self.transformer.batch_size = self.text_encoder.batch_size * 2
|
|
141
|
+
else:
|
|
142
|
+
self.transformer.batch_size = self.text_encoder.batch_size
|
|
143
|
+
|
|
144
|
+
@property
|
|
145
|
+
def max_seq_len(self):
|
|
146
|
+
return self.text_encoder_3.max_seq_len
|
|
147
|
+
|
|
148
|
+
@property
|
|
149
|
+
def batch_size(self):
|
|
150
|
+
return self.vae.batch_size
|
|
151
|
+
|
|
152
|
+
@property
|
|
153
|
+
def sample_size(self):
|
|
154
|
+
return self.transformer.sample_size
|
|
155
|
+
|
|
156
|
+
@property
|
|
157
|
+
def image_size(self):
|
|
158
|
+
return self.vae.sample_size
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class RBLNStableDiffusion3PipelineConfig(RBLNStableDiffusion3PipelineBaseConfig):
|
|
162
|
+
"""Config for SD3 Text2Img Pipeline"""
|
|
163
|
+
|
|
164
|
+
_vae_uses_encoder = False
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class RBLNStableDiffusion3Img2ImgPipelineConfig(RBLNStableDiffusion3PipelineBaseConfig):
|
|
168
|
+
"""Config for SD3 Img2Img Pipeline"""
|
|
169
|
+
|
|
170
|
+
_vae_uses_encoder = True
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class RBLNStableDiffusion3InpaintPipelineConfig(RBLNStableDiffusion3PipelineBaseConfig):
|
|
174
|
+
"""Config for SD3 Inpainting Pipeline"""
|
|
175
|
+
|
|
176
|
+
_vae_uses_encoder = True
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Any, Optional, Tuple
|
|
16
|
+
|
|
17
|
+
from ....configuration_utils import RBLNModelConfig
|
|
18
|
+
from ....transformers import RBLNCLIPTextModelConfig, RBLNCLIPTextModelWithProjectionConfig
|
|
19
|
+
from ..models import RBLNAutoencoderKLConfig, RBLNUNet2DConditionModelConfig
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class RBLNStableDiffusionXLPipelineBaseConfig(RBLNModelConfig):
|
|
23
|
+
submodules = ["text_encoder", "text_encoder_2", "unet", "vae"]
|
|
24
|
+
_vae_uses_encoder = False
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
text_encoder: Optional[RBLNCLIPTextModelConfig] = None,
|
|
29
|
+
text_encoder_2: Optional[RBLNCLIPTextModelWithProjectionConfig] = None,
|
|
30
|
+
unet: Optional[RBLNUNet2DConditionModelConfig] = None,
|
|
31
|
+
vae: Optional[RBLNAutoencoderKLConfig] = None,
|
|
32
|
+
*,
|
|
33
|
+
batch_size: Optional[int] = None,
|
|
34
|
+
img_height: Optional[int] = None,
|
|
35
|
+
img_width: Optional[int] = None,
|
|
36
|
+
height: Optional[int] = None,
|
|
37
|
+
width: Optional[int] = None,
|
|
38
|
+
sample_size: Optional[Tuple[int, int]] = None,
|
|
39
|
+
image_size: Optional[Tuple[int, int]] = None,
|
|
40
|
+
guidance_scale: Optional[float] = None,
|
|
41
|
+
**kwargs: Any,
|
|
42
|
+
):
|
|
43
|
+
"""
|
|
44
|
+
Args:
|
|
45
|
+
text_encoder (Optional[RBLNCLIPTextModelConfig]): Configuration for the primary text encoder component.
|
|
46
|
+
Initialized as RBLNCLIPTextModelConfig if not provided.
|
|
47
|
+
text_encoder_2 (Optional[RBLNCLIPTextModelWithProjectionConfig]): Configuration for the secondary text encoder component.
|
|
48
|
+
Initialized as RBLNCLIPTextModelWithProjectionConfig if not provided.
|
|
49
|
+
unet (Optional[RBLNUNet2DConditionModelConfig]): Configuration for the UNet model component.
|
|
50
|
+
Initialized as RBLNUNet2DConditionModelConfig if not provided.
|
|
51
|
+
vae (Optional[RBLNAutoencoderKLConfig]): Configuration for the VAE model component.
|
|
52
|
+
Initialized as RBLNAutoencoderKLConfig if not provided.
|
|
53
|
+
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
|
54
|
+
img_height (Optional[int]): Height of the generated images.
|
|
55
|
+
img_width (Optional[int]): Width of the generated images.
|
|
56
|
+
height (Optional[int]): Height of the generated images.
|
|
57
|
+
width (Optional[int]): Width of the generated images.
|
|
58
|
+
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the UNet model.
|
|
59
|
+
image_size (Optional[Tuple[int, int]]): Alternative way to specify image dimensions.
|
|
60
|
+
Cannot be used together with img_height/img_width.
|
|
61
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
|
62
|
+
kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
ValueError: If both image_size and img_height/img_width are provided.
|
|
66
|
+
|
|
67
|
+
Note:
|
|
68
|
+
When guidance_scale > 1.0, the UNet batch size is automatically doubled to
|
|
69
|
+
accommodate classifier-free guidance.
|
|
70
|
+
"""
|
|
71
|
+
super().__init__(**kwargs)
|
|
72
|
+
|
|
73
|
+
# Initial check for image_size conflict remains as is
|
|
74
|
+
if image_size is not None and (
|
|
75
|
+
img_height is not None or img_width is not None or height is not None or width is not None
|
|
76
|
+
):
|
|
77
|
+
raise ValueError("image_size cannot be provided alongside img_height/img_width or height/width")
|
|
78
|
+
|
|
79
|
+
# Prioritize height/width (HF-aligned)
|
|
80
|
+
if height is not None and width is not None:
|
|
81
|
+
if img_height is not None or img_width is not None:
|
|
82
|
+
# Raise error if both sets of arguments are provided
|
|
83
|
+
raise ValueError(
|
|
84
|
+
"Cannot provide both 'height'/'width' and 'img_height'/'img_width' simultaneously. "
|
|
85
|
+
"Please use one set of arguments for image dimensions, preferring 'height'/'width'."
|
|
86
|
+
)
|
|
87
|
+
image_size = (height, width)
|
|
88
|
+
elif (height is not None and width is None) or (height is None and width is not None):
|
|
89
|
+
raise ValueError("Both height and width must be provided together if used")
|
|
90
|
+
# Fallback to img_height/img_width for backward compatibility
|
|
91
|
+
elif img_height is not None and img_width is not None:
|
|
92
|
+
image_size = (img_height, img_width)
|
|
93
|
+
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
94
|
+
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
95
|
+
|
|
96
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
97
|
+
text_encoder,
|
|
98
|
+
cls_name="RBLNCLIPTextModelConfig",
|
|
99
|
+
batch_size=batch_size,
|
|
100
|
+
)
|
|
101
|
+
self.text_encoder_2 = self.initialize_submodule_config(
|
|
102
|
+
text_encoder_2,
|
|
103
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
104
|
+
batch_size=batch_size,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
self.unet = self.initialize_submodule_config(
|
|
108
|
+
unet,
|
|
109
|
+
cls_name="RBLNUNet2DConditionModelConfig",
|
|
110
|
+
sample_size=sample_size,
|
|
111
|
+
)
|
|
112
|
+
self.vae = self.initialize_submodule_config(
|
|
113
|
+
vae,
|
|
114
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
115
|
+
batch_size=batch_size,
|
|
116
|
+
uses_encoder=self.__class__._vae_uses_encoder,
|
|
117
|
+
sample_size=image_size, # image size is equal to sample size in vae
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Get default guidance scale from original class to set UNet batch size
|
|
121
|
+
if guidance_scale is None:
|
|
122
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
|
123
|
+
|
|
124
|
+
if not self.unet.batch_size_is_specified:
|
|
125
|
+
do_classifier_free_guidance = guidance_scale > 1.0
|
|
126
|
+
if do_classifier_free_guidance:
|
|
127
|
+
self.unet.batch_size = self.text_encoder.batch_size * 2
|
|
128
|
+
else:
|
|
129
|
+
self.unet.batch_size = self.text_encoder.batch_size
|
|
130
|
+
|
|
131
|
+
@property
|
|
132
|
+
def batch_size(self):
|
|
133
|
+
return self.vae.batch_size
|
|
134
|
+
|
|
135
|
+
@property
|
|
136
|
+
def sample_size(self):
|
|
137
|
+
return self.unet.sample_size
|
|
138
|
+
|
|
139
|
+
@property
|
|
140
|
+
def image_size(self):
|
|
141
|
+
return self.vae.sample_size
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class RBLNStableDiffusionXLPipelineConfig(RBLNStableDiffusionXLPipelineBaseConfig):
|
|
145
|
+
"""Config for SDXL Text2Img Pipeline"""
|
|
146
|
+
|
|
147
|
+
_vae_uses_encoder = False
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class RBLNStableDiffusionXLImg2ImgPipelineConfig(RBLNStableDiffusionXLPipelineBaseConfig):
|
|
151
|
+
"""Config for SDXL Img2Img Pipeline"""
|
|
152
|
+
|
|
153
|
+
_vae_uses_encoder = True
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class RBLNStableDiffusionXLInpaintPipelineConfig(RBLNStableDiffusionXLPipelineBaseConfig):
|
|
157
|
+
"""Config for SDXL Inpainting Pipeline"""
|
|
158
|
+
|
|
159
|
+
_vae_uses_encoder = True
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Any, Optional
|
|
16
|
+
|
|
17
|
+
from ....configuration_utils import RBLNModelConfig
|
|
18
|
+
from ....transformers import RBLNCLIPVisionModelWithProjectionConfig
|
|
19
|
+
from ..models import RBLNAutoencoderKLTemporalDecoderConfig, RBLNUNetSpatioTemporalConditionModelConfig
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class RBLNStableVideoDiffusionPipelineConfig(RBLNModelConfig):
|
|
23
|
+
submodules = ["image_encoder", "unet", "vae"]
|
|
24
|
+
_vae_uses_encoder = True
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
image_encoder: Optional[RBLNCLIPVisionModelWithProjectionConfig] = None,
|
|
29
|
+
unet: Optional[RBLNUNetSpatioTemporalConditionModelConfig] = None,
|
|
30
|
+
vae: Optional[RBLNAutoencoderKLTemporalDecoderConfig] = None,
|
|
31
|
+
*,
|
|
32
|
+
batch_size: Optional[int] = None,
|
|
33
|
+
height: Optional[int] = None,
|
|
34
|
+
width: Optional[int] = None,
|
|
35
|
+
num_frames: Optional[int] = None,
|
|
36
|
+
decode_chunk_size: Optional[int] = None,
|
|
37
|
+
guidance_scale: Optional[float] = None,
|
|
38
|
+
**kwargs: Any,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Args:
|
|
42
|
+
image_encoder (Optional[RBLNCLIPVisionModelWithProjectionConfig]): Configuration for the image encoder component.
|
|
43
|
+
Initialized as RBLNCLIPVisionModelWithProjectionConfig if not provided.
|
|
44
|
+
unet (Optional[RBLNUNetSpatioTemporalConditionModelConfig]): Configuration for the UNet model component.
|
|
45
|
+
Initialized as RBLNUNetSpatioTemporalConditionModelConfig if not provided.
|
|
46
|
+
vae (Optional[RBLNAutoencoderKLTemporalDecoderConfig]): Configuration for the VAE model component.
|
|
47
|
+
Initialized as RBLNAutoencoderKLTemporalDecoderConfig if not provided.
|
|
48
|
+
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
|
49
|
+
height (Optional[int]): Height of the generated images.
|
|
50
|
+
width (Optional[int]): Width of the generated images.
|
|
51
|
+
num_frames (Optional[int]): The number of frames in the generated video.
|
|
52
|
+
decode_chunk_size (Optional[int]): The number of frames to decode at once during VAE decoding.
|
|
53
|
+
Useful for managing memory usage during video generation.
|
|
54
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
|
55
|
+
kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
ValueError: If both image_size and height/width are provided.
|
|
59
|
+
|
|
60
|
+
Note:
|
|
61
|
+
When guidance_scale > 1.0, the UNet batch size is automatically doubled to
|
|
62
|
+
accommodate classifier-free guidance.
|
|
63
|
+
"""
|
|
64
|
+
super().__init__(**kwargs)
|
|
65
|
+
if height is not None and width is not None:
|
|
66
|
+
image_size = (height, width)
|
|
67
|
+
else:
|
|
68
|
+
# Get default image size from original class to set UNet, VAE image size
|
|
69
|
+
height = self.get_default_values_for_original_cls("__call__", ["height"])["height"]
|
|
70
|
+
width = self.get_default_values_for_original_cls("__call__", ["width"])["width"]
|
|
71
|
+
image_size = (height, width)
|
|
72
|
+
|
|
73
|
+
self.image_encoder = self.initialize_submodule_config(
|
|
74
|
+
image_encoder, cls_name="RBLNCLIPVisionModelWithProjectionConfig", batch_size=batch_size
|
|
75
|
+
)
|
|
76
|
+
self.unet = self.initialize_submodule_config(
|
|
77
|
+
unet,
|
|
78
|
+
cls_name="RBLNUNetSpatioTemporalConditionModelConfig",
|
|
79
|
+
num_frames=num_frames,
|
|
80
|
+
)
|
|
81
|
+
self.vae = self.initialize_submodule_config(
|
|
82
|
+
vae,
|
|
83
|
+
cls_name="RBLNAutoencoderKLTemporalDecoderConfig",
|
|
84
|
+
batch_size=batch_size,
|
|
85
|
+
num_frames=num_frames,
|
|
86
|
+
decode_chunk_size=decode_chunk_size,
|
|
87
|
+
uses_encoder=self.__class__._vae_uses_encoder,
|
|
88
|
+
sample_size=image_size, # image size is equal to sample size in vae
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Get default guidance scale from original class to set UNet batch size
|
|
92
|
+
if guidance_scale is None:
|
|
93
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["max_guidance_scale"])[
|
|
94
|
+
"max_guidance_scale"
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
if not self.unet.batch_size_is_specified:
|
|
98
|
+
do_classifier_free_guidance = guidance_scale > 1.0
|
|
99
|
+
if do_classifier_free_guidance:
|
|
100
|
+
self.unet.batch_size = self.image_encoder.batch_size * 2
|
|
101
|
+
else:
|
|
102
|
+
self.unet.batch_size = self.image_encoder.batch_size
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def batch_size(self):
|
|
106
|
+
return self.vae.batch_size
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def sample_size(self):
|
|
110
|
+
return self.unet.sample_size
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def image_size(self):
|
|
114
|
+
return self.vae.sample_size
|