optimum-rbln 0.9.3.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of optimum-rbln might be problematic. Click here for more details.
- optimum/rbln/__init__.py +505 -0
- optimum/rbln/__version__.py +34 -0
- optimum/rbln/cli.py +660 -0
- optimum/rbln/configuration_utils.py +968 -0
- optimum/rbln/diffusers/__init__.py +198 -0
- optimum/rbln/diffusers/configurations/__init__.py +37 -0
- optimum/rbln/diffusers/configurations/models/__init__.py +10 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl.py +73 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_cosmos.py +84 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_temporal_decoder.py +67 -0
- optimum/rbln/diffusers/configurations/models/configuration_controlnet.py +64 -0
- optimum/rbln/diffusers/configurations/models/configuration_prior_transformer.py +59 -0
- optimum/rbln/diffusers/configurations/models/configuration_transformer_cosmos.py +78 -0
- optimum/rbln/diffusers/configurations/models/configuration_transformer_sd3.py +63 -0
- optimum/rbln/diffusers/configurations/models/configuration_unet_2d_condition.py +81 -0
- optimum/rbln/diffusers/configurations/models/configuration_unet_spatio_temporal_condition.py +59 -0
- optimum/rbln/diffusers/configurations/models/configuration_vq_model.py +74 -0
- optimum/rbln/diffusers/configurations/pipelines/__init__.py +34 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_controlnet.py +316 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_cosmos.py +117 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_kandinsky2_2.py +363 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion.py +156 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_3.py +176 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_xl.py +159 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_video_diffusion.py +114 -0
- optimum/rbln/diffusers/modeling_diffusers.py +451 -0
- optimum/rbln/diffusers/models/__init__.py +64 -0
- optimum/rbln/diffusers/models/autoencoders/__init__.py +18 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py +255 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_cosmos.py +245 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +275 -0
- optimum/rbln/diffusers/models/autoencoders/vae.py +178 -0
- optimum/rbln/diffusers/models/autoencoders/vq_model.py +211 -0
- optimum/rbln/diffusers/models/controlnet.py +281 -0
- optimum/rbln/diffusers/models/transformers/__init__.py +17 -0
- optimum/rbln/diffusers/models/transformers/prior_transformer.py +160 -0
- optimum/rbln/diffusers/models/transformers/transformer_cosmos.py +344 -0
- optimum/rbln/diffusers/models/transformers/transformer_sd3.py +191 -0
- optimum/rbln/diffusers/models/unets/__init__.py +16 -0
- optimum/rbln/diffusers/models/unets/unet_2d_condition.py +408 -0
- optimum/rbln/diffusers/models/unets/unet_spatio_temporal_condition.py +201 -0
- optimum/rbln/diffusers/pipelines/__init__.py +113 -0
- optimum/rbln/diffusers/pipelines/auto_pipeline.py +307 -0
- optimum/rbln/diffusers/pipelines/controlnet/__init__.py +19 -0
- optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +139 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +669 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +640 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +825 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +837 -0
- optimum/rbln/diffusers/pipelines/cosmos/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/cosmos/configuration_cosmos_guardrail.py +113 -0
- optimum/rbln/diffusers/pipelines/cosmos/cosmos_guardrail.py +425 -0
- optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +128 -0
- optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +128 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py +23 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +207 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_video_diffusion/__init__.py +15 -0
- optimum/rbln/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +46 -0
- optimum/rbln/modeling.py +364 -0
- optimum/rbln/modeling_base.py +637 -0
- optimum/rbln/ops/__init__.py +19 -0
- optimum/rbln/ops/attn.py +455 -0
- optimum/rbln/ops/flash_attn.py +350 -0
- optimum/rbln/ops/kv_cache_update.py +29 -0
- optimum/rbln/ops/linear.py +32 -0
- optimum/rbln/ops/sliding_window_attn.py +111 -0
- optimum/rbln/transformers/__init__.py +340 -0
- optimum/rbln/transformers/configuration_generic.py +120 -0
- optimum/rbln/transformers/modeling_attention_utils.py +385 -0
- optimum/rbln/transformers/modeling_generic.py +280 -0
- optimum/rbln/transformers/modeling_outputs.py +37 -0
- optimum/rbln/transformers/modeling_rope_utils.py +314 -0
- optimum/rbln/transformers/models/__init__.py +343 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/__init__.py +17 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +47 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +91 -0
- optimum/rbln/transformers/models/auto/__init__.py +31 -0
- optimum/rbln/transformers/models/auto/auto_factory.py +267 -0
- optimum/rbln/transformers/models/auto/modeling_auto.py +162 -0
- optimum/rbln/transformers/models/bart/__init__.py +17 -0
- optimum/rbln/transformers/models/bart/bart_architecture.py +163 -0
- optimum/rbln/transformers/models/bart/configuration_bart.py +36 -0
- optimum/rbln/transformers/models/bart/modeling_bart.py +86 -0
- optimum/rbln/transformers/models/bert/__init__.py +16 -0
- optimum/rbln/transformers/models/bert/bert_architecture.py +16 -0
- optimum/rbln/transformers/models/bert/configuration_bert.py +46 -0
- optimum/rbln/transformers/models/bert/modeling_bert.py +148 -0
- optimum/rbln/transformers/models/blip_2/__init__.py +20 -0
- optimum/rbln/transformers/models/blip_2/configuration_blip_2.py +115 -0
- optimum/rbln/transformers/models/blip_2/modeling_blip_2.py +526 -0
- optimum/rbln/transformers/models/clip/__init__.py +26 -0
- optimum/rbln/transformers/models/clip/configuration_clip.py +103 -0
- optimum/rbln/transformers/models/clip/modeling_clip.py +384 -0
- optimum/rbln/transformers/models/colpali/__init__.py +2 -0
- optimum/rbln/transformers/models/colpali/colpali_architecture.py +218 -0
- optimum/rbln/transformers/models/colpali/configuration_colpali.py +84 -0
- optimum/rbln/transformers/models/colpali/modeling_colpali.py +361 -0
- optimum/rbln/transformers/models/colqwen2/__init__.py +2 -0
- optimum/rbln/transformers/models/colqwen2/colqwen2_architecture.py +233 -0
- optimum/rbln/transformers/models/colqwen2/configuration_colqwen2.py +74 -0
- optimum/rbln/transformers/models/colqwen2/modeling_colqwen2.py +446 -0
- optimum/rbln/transformers/models/decoderonly/__init__.py +27 -0
- optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py +300 -0
- optimum/rbln/transformers/models/decoderonly/configuration_lora.py +411 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +1224 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_runtime_utils.py +508 -0
- optimum/rbln/transformers/models/decoderonly/generation_decoderonly.py +119 -0
- optimum/rbln/transformers/models/decoderonly/lora_architecture.py +204 -0
- optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +823 -0
- optimum/rbln/transformers/models/depth_anything/__init__.py +16 -0
- optimum/rbln/transformers/models/depth_anything/configuration_depth_anything.py +24 -0
- optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py +42 -0
- optimum/rbln/transformers/models/distilbert/__init__.py +19 -0
- optimum/rbln/transformers/models/distilbert/configuration_distilbert.py +24 -0
- optimum/rbln/transformers/models/distilbert/modeling_distilbert.py +51 -0
- optimum/rbln/transformers/models/dpt/__init__.py +16 -0
- optimum/rbln/transformers/models/dpt/configuration_dpt.py +24 -0
- optimum/rbln/transformers/models/dpt/modeling_dpt.py +42 -0
- optimum/rbln/transformers/models/exaone/__init__.py +24 -0
- optimum/rbln/transformers/models/exaone/configuration_exaone.py +42 -0
- optimum/rbln/transformers/models/exaone/exaone_architecture.py +77 -0
- optimum/rbln/transformers/models/exaone/modeling_exaone.py +145 -0
- optimum/rbln/transformers/models/gemma/__init__.py +16 -0
- optimum/rbln/transformers/models/gemma/configuration_gemma.py +50 -0
- optimum/rbln/transformers/models/gemma/gemma_architecture.py +27 -0
- optimum/rbln/transformers/models/gemma/modeling_gemma.py +104 -0
- optimum/rbln/transformers/models/gemma3/__init__.py +16 -0
- optimum/rbln/transformers/models/gemma3/configuration_gemma3.py +109 -0
- optimum/rbln/transformers/models/gemma3/gemma3_architecture.py +170 -0
- optimum/rbln/transformers/models/gemma3/gemma3_runtime_utils.py +245 -0
- optimum/rbln/transformers/models/gemma3/modeling_gemma3.py +611 -0
- optimum/rbln/transformers/models/gpt2/__init__.py +16 -0
- optimum/rbln/transformers/models/gpt2/configuration_gpt2.py +50 -0
- optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +93 -0
- optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +55 -0
- optimum/rbln/transformers/models/grounding_dino/__init__.py +10 -0
- optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py +92 -0
- optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py +599 -0
- optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py +1048 -0
- optimum/rbln/transformers/models/idefics3/__init__.py +16 -0
- optimum/rbln/transformers/models/idefics3/configuration_idefics3.py +89 -0
- optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +497 -0
- optimum/rbln/transformers/models/llama/__init__.py +16 -0
- optimum/rbln/transformers/models/llama/configuration_llama.py +50 -0
- optimum/rbln/transformers/models/llama/llama_architecture.py +19 -0
- optimum/rbln/transformers/models/llama/modeling_llama.py +104 -0
- optimum/rbln/transformers/models/llava/__init__.py +16 -0
- optimum/rbln/transformers/models/llava/configuration_llava.py +72 -0
- optimum/rbln/transformers/models/llava/modeling_llava.py +490 -0
- optimum/rbln/transformers/models/llava_next/__init__.py +16 -0
- optimum/rbln/transformers/models/llava_next/configuration_llava_next.py +69 -0
- optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +493 -0
- optimum/rbln/transformers/models/midm/__init__.py +24 -0
- optimum/rbln/transformers/models/midm/configuration_midm.py +42 -0
- optimum/rbln/transformers/models/midm/midm_architecture.py +144 -0
- optimum/rbln/transformers/models/midm/modeling_midm.py +144 -0
- optimum/rbln/transformers/models/mistral/__init__.py +16 -0
- optimum/rbln/transformers/models/mistral/configuration_mistral.py +50 -0
- optimum/rbln/transformers/models/mistral/mistral_architecture.py +19 -0
- optimum/rbln/transformers/models/mistral/modeling_mistral.py +115 -0
- optimum/rbln/transformers/models/opt/__init__.py +16 -0
- optimum/rbln/transformers/models/opt/configuration_opt.py +29 -0
- optimum/rbln/transformers/models/opt/modeling_opt.py +102 -0
- optimum/rbln/transformers/models/opt/opt_architecture.py +74 -0
- optimum/rbln/transformers/models/pegasus/__init__.py +17 -0
- optimum/rbln/transformers/models/pegasus/configuration_pegasus.py +38 -0
- optimum/rbln/transformers/models/pegasus/modeling_pegasus.py +71 -0
- optimum/rbln/transformers/models/pegasus/pegasus_architecture.py +161 -0
- optimum/rbln/transformers/models/phi/__init__.py +16 -0
- optimum/rbln/transformers/models/phi/configuration_phi.py +50 -0
- optimum/rbln/transformers/models/phi/modeling_phi.py +92 -0
- optimum/rbln/transformers/models/phi/phi_architecture.py +115 -0
- optimum/rbln/transformers/models/pixtral/__init__.py +16 -0
- optimum/rbln/transformers/models/pixtral/configuration_pixtral.py +43 -0
- optimum/rbln/transformers/models/pixtral/modeling_pixtral.py +322 -0
- optimum/rbln/transformers/models/pixtral/pixtral_architecture.py +73 -0
- optimum/rbln/transformers/models/qwen2/__init__.py +16 -0
- optimum/rbln/transformers/models/qwen2/configuration_qwen2.py +50 -0
- optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +123 -0
- optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +111 -0
- optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +636 -0
- optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +220 -0
- optimum/rbln/transformers/models/qwen2_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_vl/configuration_qwen2_vl.py +88 -0
- optimum/rbln/transformers/models/qwen2_vl/modeling_qwen2_vl.py +513 -0
- optimum/rbln/transformers/models/qwen2_vl/qwen2_vl_architecture.py +165 -0
- optimum/rbln/transformers/models/qwen3/__init__.py +16 -0
- optimum/rbln/transformers/models/qwen3/configuration_qwen3.py +71 -0
- optimum/rbln/transformers/models/qwen3/modeling_qwen3.py +133 -0
- optimum/rbln/transformers/models/qwen3/qwen3_architecture.py +31 -0
- optimum/rbln/transformers/models/resnet/__init__.py +23 -0
- optimum/rbln/transformers/models/resnet/configuration_resnet.py +42 -0
- optimum/rbln/transformers/models/resnet/modeling_resnet.py +99 -0
- optimum/rbln/transformers/models/roberta/__init__.py +24 -0
- optimum/rbln/transformers/models/roberta/configuration_roberta.py +33 -0
- optimum/rbln/transformers/models/roberta/modeling_roberta.py +72 -0
- optimum/rbln/transformers/models/seq2seq/__init__.py +16 -0
- optimum/rbln/transformers/models/seq2seq/configuration_seq2seq.py +71 -0
- optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +477 -0
- optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +527 -0
- optimum/rbln/transformers/models/siglip/__init__.py +16 -0
- optimum/rbln/transformers/models/siglip/configuration_siglip.py +76 -0
- optimum/rbln/transformers/models/siglip/modeling_siglip.py +199 -0
- optimum/rbln/transformers/models/swin/__init__.py +16 -0
- optimum/rbln/transformers/models/swin/configuration_swin.py +42 -0
- optimum/rbln/transformers/models/swin/modeling_swin.py +354 -0
- optimum/rbln/transformers/models/t5/__init__.py +17 -0
- optimum/rbln/transformers/models/t5/configuration_t5.py +36 -0
- optimum/rbln/transformers/models/t5/modeling_t5.py +130 -0
- optimum/rbln/transformers/models/t5/t5_architecture.py +264 -0
- optimum/rbln/transformers/models/time_series_transformer/__init__.py +26 -0
- optimum/rbln/transformers/models/time_series_transformer/configuration_time_series_transformer.py +41 -0
- optimum/rbln/transformers/models/time_series_transformer/modeling_time_series_transformer.py +435 -0
- optimum/rbln/transformers/models/time_series_transformer/time_series_transformers_architecture.py +337 -0
- optimum/rbln/transformers/models/vit/__init__.py +19 -0
- optimum/rbln/transformers/models/vit/configuration_vit.py +24 -0
- optimum/rbln/transformers/models/vit/modeling_vit.py +44 -0
- optimum/rbln/transformers/models/wav2vec2/__init__.py +16 -0
- optimum/rbln/transformers/models/wav2vec2/configuration_wav2vec2.py +38 -0
- optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +104 -0
- optimum/rbln/transformers/models/whisper/__init__.py +17 -0
- optimum/rbln/transformers/models/whisper/configuration_whisper.py +72 -0
- optimum/rbln/transformers/models/whisper/generation_whisper.py +159 -0
- optimum/rbln/transformers/models/whisper/modeling_whisper.py +475 -0
- optimum/rbln/transformers/models/whisper/whisper_architecture.py +349 -0
- optimum/rbln/transformers/models/xlm_roberta/__init__.py +24 -0
- optimum/rbln/transformers/models/xlm_roberta/configuration_xlm_roberta.py +32 -0
- optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +82 -0
- optimum/rbln/transformers/utils/__init__.py +0 -0
- optimum/rbln/transformers/utils/rbln_quantization.py +589 -0
- optimum/rbln/transformers/utils/rbln_runtime_wrapper.py +79 -0
- optimum/rbln/utils/__init__.py +16 -0
- optimum/rbln/utils/decorator_utils.py +86 -0
- optimum/rbln/utils/deprecation.py +213 -0
- optimum/rbln/utils/hub.py +94 -0
- optimum/rbln/utils/import_utils.py +170 -0
- optimum/rbln/utils/logging.py +110 -0
- optimum/rbln/utils/model_utils.py +63 -0
- optimum/rbln/utils/runtime_utils.py +249 -0
- optimum/rbln/utils/save_utils.py +102 -0
- optimum/rbln/utils/submodule.py +152 -0
- optimum_rbln-0.9.3.post1.dist-info/METADATA +124 -0
- optimum_rbln-0.9.3.post1.dist-info/RECORD +264 -0
- optimum_rbln-0.9.3.post1.dist-info/WHEEL +4 -0
- optimum_rbln-0.9.3.post1.dist-info/entry_points.txt +2 -0
- optimum_rbln-0.9.3.post1.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from typing import Any, Dict, Optional
|
|
17
|
+
|
|
18
|
+
from diffusers import CosmosVideoToWorldPipeline
|
|
19
|
+
from diffusers.schedulers import EDMEulerScheduler
|
|
20
|
+
from transformers import T5TokenizerFast
|
|
21
|
+
|
|
22
|
+
from ....transformers.models.t5.modeling_t5 import RBLNT5EncoderModel
|
|
23
|
+
from ....utils.logging import get_logger
|
|
24
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
25
|
+
from ...models.autoencoders.autoencoder_kl_cosmos import RBLNAutoencoderKLCosmos
|
|
26
|
+
from ...models.transformers.transformer_cosmos import RBLNCosmosTransformer3DModel
|
|
27
|
+
from .cosmos_guardrail import RBLNCosmosSafetyChecker
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
logger = get_logger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class RBLNCosmosVideoToWorldPipeline(RBLNDiffusionMixin, CosmosVideoToWorldPipeline):
|
|
34
|
+
"""
|
|
35
|
+
RBLN-accelerated implementation of Cosmos Video to World pipeline for video-to-video generation.
|
|
36
|
+
|
|
37
|
+
This pipeline compiles Cosmos Video to World models to run efficiently on RBLN NPUs, enabling high-performance
|
|
38
|
+
inference for generating videos with distinctive artistic style and enhanced visual quality.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
original_class = CosmosVideoToWorldPipeline
|
|
42
|
+
_submodules = ["text_encoder", "transformer", "vae"]
|
|
43
|
+
_optional_submodules = ["safety_checker"]
|
|
44
|
+
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
text_encoder: RBLNT5EncoderModel,
|
|
48
|
+
tokenizer: T5TokenizerFast,
|
|
49
|
+
transformer: RBLNCosmosTransformer3DModel,
|
|
50
|
+
vae: RBLNAutoencoderKLCosmos,
|
|
51
|
+
scheduler: EDMEulerScheduler,
|
|
52
|
+
safety_checker: RBLNCosmosSafetyChecker = None,
|
|
53
|
+
):
|
|
54
|
+
if safety_checker is None:
|
|
55
|
+
safety_checker = RBLNCosmosSafetyChecker()
|
|
56
|
+
|
|
57
|
+
super().__init__(
|
|
58
|
+
text_encoder=text_encoder,
|
|
59
|
+
tokenizer=tokenizer,
|
|
60
|
+
transformer=transformer,
|
|
61
|
+
vae=vae,
|
|
62
|
+
scheduler=scheduler,
|
|
63
|
+
safety_checker=safety_checker,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
def handle_additional_kwargs(self, **kwargs):
|
|
67
|
+
if "num_frames" in kwargs and kwargs["num_frames"] != self.transformer.rbln_config.num_frames:
|
|
68
|
+
logger.warning(
|
|
69
|
+
f"The transformer in this pipeline is compiled with 'num_frames={self.transformer.rbln_config.num_frames}'. 'num_frames' set by the user will be ignored"
|
|
70
|
+
)
|
|
71
|
+
kwargs.pop("num_frames")
|
|
72
|
+
if (
|
|
73
|
+
"max_sequence_length" in kwargs
|
|
74
|
+
and kwargs["max_sequence_length"] != self.transformer.rbln_config.max_seq_len
|
|
75
|
+
):
|
|
76
|
+
logger.warning(
|
|
77
|
+
f"The transformer in this pipeline is compiled with 'max_seq_len={self.transformer.rbln_config.max_seq_len}'. 'max_sequence_length' set by the user will be ignored"
|
|
78
|
+
)
|
|
79
|
+
kwargs.pop("max_sequence_length")
|
|
80
|
+
return kwargs
|
|
81
|
+
|
|
82
|
+
@classmethod
|
|
83
|
+
def from_pretrained(
|
|
84
|
+
cls,
|
|
85
|
+
model_id: str,
|
|
86
|
+
*,
|
|
87
|
+
export: bool = False,
|
|
88
|
+
safety_checker: Optional[RBLNCosmosSafetyChecker] = None,
|
|
89
|
+
rbln_config: Dict[str, Any] = {},
|
|
90
|
+
**kwargs: Any,
|
|
91
|
+
):
|
|
92
|
+
"""
|
|
93
|
+
Load a pretrained diffusion pipeline from a model checkpoint, with optional compilation for RBLN NPUs.
|
|
94
|
+
|
|
95
|
+
This method has two distinct operating modes:
|
|
96
|
+
- When `export=True`: Takes a PyTorch-based diffusion model, compiles it for RBLN NPUs, and loads the compiled model
|
|
97
|
+
- When `export=False`: Loads an already compiled RBLN model from `model_id` without recompilation
|
|
98
|
+
|
|
99
|
+
It supports various diffusion pipelines including Stable Diffusion, Kandinsky, ControlNet, and other diffusers-based models.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
model_id (`str`):
|
|
103
|
+
The model ID or path to the pretrained model to load. Can be either:
|
|
104
|
+
|
|
105
|
+
- A model ID from the HuggingFace Hub
|
|
106
|
+
- A local path to a saved model directory
|
|
107
|
+
export:
|
|
108
|
+
If True, takes a PyTorch model from `model_id` and compiles it for RBLN NPU execution.
|
|
109
|
+
If False, loads an already compiled RBLN model from `model_id` without recompilation.
|
|
110
|
+
safety_checker:
|
|
111
|
+
Optional custom safety checker to use instead of the default one. Only used when `export=True`.
|
|
112
|
+
rbln_config:
|
|
113
|
+
Configuration options for RBLN compilation. Can include settings for specific submodules
|
|
114
|
+
such as `text_encoder`, `unet`, and `vae`. Configuration can be tailored to the specific
|
|
115
|
+
pipeline being compiled.
|
|
116
|
+
kwargs:
|
|
117
|
+
Additional arguments to pass to the underlying diffusion pipeline constructor or the
|
|
118
|
+
RBLN compilation process. These may include parameters specific to individual submodules
|
|
119
|
+
or the particular diffusion pipeline being used.
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
rbln_config, kwargs = cls.get_rbln_config_class().initialize_from_kwargs(rbln_config, **kwargs)
|
|
123
|
+
if safety_checker is None and export:
|
|
124
|
+
safety_checker = RBLNCosmosSafetyChecker(rbln_config=rbln_config.safety_checker)
|
|
125
|
+
|
|
126
|
+
return super().from_pretrained(
|
|
127
|
+
model_id, export=export, safety_checker=safety_checker, rbln_config=rbln_config, **kwargs
|
|
128
|
+
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from .pipeline_kandinsky2_2 import RBLNKandinskyV22Pipeline
|
|
16
|
+
from .pipeline_kandinsky2_2_combined import (
|
|
17
|
+
RBLNKandinskyV22CombinedPipeline,
|
|
18
|
+
RBLNKandinskyV22Img2ImgCombinedPipeline,
|
|
19
|
+
RBLNKandinskyV22InpaintCombinedPipeline,
|
|
20
|
+
)
|
|
21
|
+
from .pipeline_kandinsky2_2_img2img import RBLNKandinskyV22Img2ImgPipeline
|
|
22
|
+
from .pipeline_kandinsky2_2_inpaint import RBLNKandinskyV22InpaintPipeline
|
|
23
|
+
from .pipeline_kandinsky2_2_prior import RBLNKandinskyV22PriorPipeline
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import KandinskyV22Pipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNKandinskyV22PipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNKandinskyV22Pipeline(RBLNDiffusionMixin, KandinskyV22Pipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Kandinsky 2.2 pipeline for text-to-image generation.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Kandinsky 2.2 models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference for generating images with distinctive artistic style and enhanced visual quality.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = KandinskyV22Pipeline
|
|
30
|
+
_rbln_config_class = RBLNKandinskyV22PipelineConfig
|
|
31
|
+
_submodules = ["unet", "movq"]
|
|
32
|
+
|
|
33
|
+
def get_compiled_image_size(self):
|
|
34
|
+
return self.movq.image_size
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import (
|
|
16
|
+
DDPMScheduler,
|
|
17
|
+
KandinskyV22CombinedPipeline,
|
|
18
|
+
KandinskyV22Img2ImgCombinedPipeline,
|
|
19
|
+
KandinskyV22InpaintCombinedPipeline,
|
|
20
|
+
PriorTransformer,
|
|
21
|
+
UnCLIPScheduler,
|
|
22
|
+
UNet2DConditionModel,
|
|
23
|
+
VQModel,
|
|
24
|
+
)
|
|
25
|
+
from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection
|
|
26
|
+
|
|
27
|
+
from ...configurations import RBLNKandinskyV22CombinedPipelineConfig
|
|
28
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
29
|
+
from .pipeline_kandinsky2_2 import RBLNKandinskyV22Pipeline
|
|
30
|
+
from .pipeline_kandinsky2_2_img2img import RBLNKandinskyV22Img2ImgPipeline
|
|
31
|
+
from .pipeline_kandinsky2_2_inpaint import RBLNKandinskyV22InpaintPipeline
|
|
32
|
+
from .pipeline_kandinsky2_2_prior import RBLNKandinskyV22PriorPipeline
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class RBLNKandinskyV22CombinedPipeline(RBLNDiffusionMixin, KandinskyV22CombinedPipeline):
|
|
36
|
+
"""
|
|
37
|
+
RBLN-accelerated implementation of Kandinsky 2.2 combined pipeline for end-to-end text-to-image generation.
|
|
38
|
+
|
|
39
|
+
This pipeline compiles both prior and decoder Kandinsky 2.2 models to run efficiently on RBLN NPUs, enabling
|
|
40
|
+
high-performance inference for complete text-to-image generation with distinctive artistic style.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
original_class = KandinskyV22CombinedPipeline
|
|
44
|
+
_rbln_config_class = RBLNKandinskyV22CombinedPipelineConfig
|
|
45
|
+
_connected_classes = {"prior_pipe": RBLNKandinskyV22PriorPipeline, "decoder_pipe": RBLNKandinskyV22Pipeline}
|
|
46
|
+
_submodules = ["prior_image_encoder", "prior_text_encoder", "prior_prior", "unet", "movq"]
|
|
47
|
+
_prefix = {"prior_pipe": "prior_"}
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
unet: UNet2DConditionModel,
|
|
52
|
+
scheduler: DDPMScheduler,
|
|
53
|
+
movq: VQModel,
|
|
54
|
+
prior_prior: PriorTransformer,
|
|
55
|
+
prior_image_encoder: CLIPVisionModelWithProjection,
|
|
56
|
+
prior_text_encoder: CLIPTextModelWithProjection,
|
|
57
|
+
prior_tokenizer: CLIPTokenizer,
|
|
58
|
+
prior_scheduler: UnCLIPScheduler,
|
|
59
|
+
prior_image_processor: CLIPImageProcessor,
|
|
60
|
+
):
|
|
61
|
+
RBLNDiffusionMixin.__init__(self)
|
|
62
|
+
super(KandinskyV22CombinedPipeline, self).__init__()
|
|
63
|
+
|
|
64
|
+
self.register_modules(
|
|
65
|
+
unet=unet,
|
|
66
|
+
scheduler=scheduler,
|
|
67
|
+
movq=movq,
|
|
68
|
+
prior_prior=prior_prior,
|
|
69
|
+
prior_image_encoder=prior_image_encoder,
|
|
70
|
+
prior_text_encoder=prior_text_encoder,
|
|
71
|
+
prior_tokenizer=prior_tokenizer,
|
|
72
|
+
prior_scheduler=prior_scheduler,
|
|
73
|
+
prior_image_processor=prior_image_processor,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
self.prior_pipe = RBLNKandinskyV22PriorPipeline(
|
|
77
|
+
prior=prior_prior,
|
|
78
|
+
image_encoder=prior_image_encoder,
|
|
79
|
+
text_encoder=prior_text_encoder,
|
|
80
|
+
tokenizer=prior_tokenizer,
|
|
81
|
+
scheduler=prior_scheduler,
|
|
82
|
+
image_processor=prior_image_processor,
|
|
83
|
+
)
|
|
84
|
+
self.decoder_pipe = RBLNKandinskyV22Pipeline(
|
|
85
|
+
unet=unet,
|
|
86
|
+
scheduler=scheduler,
|
|
87
|
+
movq=movq,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def get_compiled_image_size(self):
|
|
91
|
+
return self.movq.image_size
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class RBLNKandinskyV22Img2ImgCombinedPipeline(RBLNDiffusionMixin, KandinskyV22Img2ImgCombinedPipeline):
|
|
95
|
+
"""
|
|
96
|
+
RBLN-accelerated implementation of Kandinsky 2.2 combined pipeline for end-to-end image-to-image generation.
|
|
97
|
+
|
|
98
|
+
This pipeline compiles both prior and decoder Kandinsky 2.2 models to run efficiently on RBLN NPUs, enabling
|
|
99
|
+
high-performance inference for complete image-to-image transformation with distinctive artistic style.
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
original_class = KandinskyV22Img2ImgCombinedPipeline
|
|
103
|
+
_connected_classes = {"prior_pipe": RBLNKandinskyV22PriorPipeline, "decoder_pipe": RBLNKandinskyV22Img2ImgPipeline}
|
|
104
|
+
_submodules = ["prior_image_encoder", "prior_text_encoder", "prior_prior", "unet", "movq"]
|
|
105
|
+
_prefix = {"prior_pipe": "prior_"}
|
|
106
|
+
|
|
107
|
+
def __init__(
|
|
108
|
+
self,
|
|
109
|
+
unet: UNet2DConditionModel,
|
|
110
|
+
scheduler: DDPMScheduler,
|
|
111
|
+
movq: VQModel,
|
|
112
|
+
prior_prior: PriorTransformer,
|
|
113
|
+
prior_image_encoder: CLIPVisionModelWithProjection,
|
|
114
|
+
prior_text_encoder: CLIPTextModelWithProjection,
|
|
115
|
+
prior_tokenizer: CLIPTokenizer,
|
|
116
|
+
prior_scheduler: UnCLIPScheduler,
|
|
117
|
+
prior_image_processor: CLIPImageProcessor,
|
|
118
|
+
):
|
|
119
|
+
RBLNDiffusionMixin.__init__(self)
|
|
120
|
+
super(KandinskyV22Img2ImgCombinedPipeline, self).__init__()
|
|
121
|
+
|
|
122
|
+
self.register_modules(
|
|
123
|
+
unet=unet,
|
|
124
|
+
scheduler=scheduler,
|
|
125
|
+
movq=movq,
|
|
126
|
+
prior_prior=prior_prior,
|
|
127
|
+
prior_image_encoder=prior_image_encoder,
|
|
128
|
+
prior_text_encoder=prior_text_encoder,
|
|
129
|
+
prior_tokenizer=prior_tokenizer,
|
|
130
|
+
prior_scheduler=prior_scheduler,
|
|
131
|
+
prior_image_processor=prior_image_processor,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
self.prior_pipe = RBLNKandinskyV22PriorPipeline(
|
|
135
|
+
prior=prior_prior,
|
|
136
|
+
image_encoder=prior_image_encoder,
|
|
137
|
+
text_encoder=prior_text_encoder,
|
|
138
|
+
tokenizer=prior_tokenizer,
|
|
139
|
+
scheduler=prior_scheduler,
|
|
140
|
+
image_processor=prior_image_processor,
|
|
141
|
+
)
|
|
142
|
+
self.decoder_pipe = RBLNKandinskyV22Img2ImgPipeline(
|
|
143
|
+
unet=unet,
|
|
144
|
+
scheduler=scheduler,
|
|
145
|
+
movq=movq,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
def get_compiled_image_size(self):
|
|
149
|
+
return self.movq.image_size
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class RBLNKandinskyV22InpaintCombinedPipeline(RBLNDiffusionMixin, KandinskyV22InpaintCombinedPipeline):
|
|
153
|
+
"""
|
|
154
|
+
RBLN-accelerated implementation of Kandinsky 2.2 combined pipeline for end-to-end image inpainting.
|
|
155
|
+
|
|
156
|
+
This pipeline compiles both prior and decoder Kandinsky 2.2 models to run efficiently on RBLN NPUs, enabling
|
|
157
|
+
high-performance inference for complete image inpainting with distinctive artistic style and seamless integration.
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
original_class = KandinskyV22InpaintCombinedPipeline
|
|
161
|
+
_connected_classes = {"prior_pipe": RBLNKandinskyV22PriorPipeline, "decoder_pipe": RBLNKandinskyV22InpaintPipeline}
|
|
162
|
+
_submodules = ["prior_image_encoder", "prior_text_encoder", "prior_prior", "unet", "movq"]
|
|
163
|
+
_prefix = {"prior_pipe": "prior_"}
|
|
164
|
+
|
|
165
|
+
def __init__(
|
|
166
|
+
self,
|
|
167
|
+
unet: UNet2DConditionModel,
|
|
168
|
+
scheduler: DDPMScheduler,
|
|
169
|
+
movq: VQModel,
|
|
170
|
+
prior_prior: PriorTransformer,
|
|
171
|
+
prior_image_encoder: CLIPVisionModelWithProjection,
|
|
172
|
+
prior_text_encoder: CLIPTextModelWithProjection,
|
|
173
|
+
prior_tokenizer: CLIPTokenizer,
|
|
174
|
+
prior_scheduler: UnCLIPScheduler,
|
|
175
|
+
prior_image_processor: CLIPImageProcessor,
|
|
176
|
+
):
|
|
177
|
+
RBLNDiffusionMixin.__init__(self)
|
|
178
|
+
super(KandinskyV22InpaintCombinedPipeline, self).__init__()
|
|
179
|
+
|
|
180
|
+
self.register_modules(
|
|
181
|
+
unet=unet,
|
|
182
|
+
scheduler=scheduler,
|
|
183
|
+
movq=movq,
|
|
184
|
+
prior_prior=prior_prior,
|
|
185
|
+
prior_image_encoder=prior_image_encoder,
|
|
186
|
+
prior_text_encoder=prior_text_encoder,
|
|
187
|
+
prior_tokenizer=prior_tokenizer,
|
|
188
|
+
prior_scheduler=prior_scheduler,
|
|
189
|
+
prior_image_processor=prior_image_processor,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
self.prior_pipe = RBLNKandinskyV22PriorPipeline(
|
|
193
|
+
prior=prior_prior,
|
|
194
|
+
image_encoder=prior_image_encoder,
|
|
195
|
+
text_encoder=prior_text_encoder,
|
|
196
|
+
tokenizer=prior_tokenizer,
|
|
197
|
+
scheduler=prior_scheduler,
|
|
198
|
+
image_processor=prior_image_processor,
|
|
199
|
+
)
|
|
200
|
+
self.decoder_pipe = RBLNKandinskyV22InpaintPipeline(
|
|
201
|
+
unet=unet,
|
|
202
|
+
scheduler=scheduler,
|
|
203
|
+
movq=movq,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
def get_compiled_image_size(self):
|
|
207
|
+
return self.movq.image_size
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import KandinskyV22Img2ImgPipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNKandinskyV22Img2ImgPipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNKandinskyV22Img2ImgPipeline(RBLNDiffusionMixin, KandinskyV22Img2ImgPipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Kandinsky 2.2 pipeline for image-to-image generation.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Kandinsky 2.2 models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference for transforming input images with distinctive artistic style and enhanced visual fidelity.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = KandinskyV22Img2ImgPipeline
|
|
30
|
+
_rbln_config_class = RBLNKandinskyV22Img2ImgPipelineConfig
|
|
31
|
+
_submodules = ["unet", "movq"]
|
|
32
|
+
|
|
33
|
+
def get_compiled_image_size(self):
|
|
34
|
+
return self.movq.image_size
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import KandinskyV22InpaintPipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNKandinskyV22InpaintPipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNKandinskyV22InpaintPipeline(RBLNDiffusionMixin, KandinskyV22InpaintPipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Kandinsky 2.2 pipeline for image inpainting.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Kandinsky 2.2 models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference for filling masked regions with distinctive artistic style and seamless content integration.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = KandinskyV22InpaintPipeline
|
|
30
|
+
_rbln_config_class = RBLNKandinskyV22InpaintPipelineConfig
|
|
31
|
+
_submodules = ["unet", "movq"]
|
|
32
|
+
|
|
33
|
+
def get_compiled_image_size(self):
|
|
34
|
+
return self.movq.image_size
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright 2024 Rebellions Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import KandinskyV22PriorPipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNKandinskyV22PriorPipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNKandinskyV22PriorPipeline(RBLNDiffusionMixin, KandinskyV22PriorPipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Kandinsky 2.2 prior pipeline for text and image embedding generation.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Kandinsky 2.2 prior models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference for generating image embeddings from text prompts and image inputs for downstream generation tasks.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = KandinskyV22PriorPipeline
|
|
30
|
+
_rbln_config_class = RBLNKandinskyV22PriorPipelineConfig
|
|
31
|
+
_submodules = ["text_encoder", "image_encoder", "prior"]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from .pipeline_stable_diffusion import RBLNStableDiffusionPipeline
|
|
16
|
+
from .pipeline_stable_diffusion_img2img import RBLNStableDiffusionImg2ImgPipeline
|
|
17
|
+
from .pipeline_stable_diffusion_inpaint import RBLNStableDiffusionInpaintPipeline
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from diffusers import StableDiffusionPipeline
|
|
17
|
+
|
|
18
|
+
from ...configurations import RBLNStableDiffusionPipelineConfig
|
|
19
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class RBLNStableDiffusionPipeline(RBLNDiffusionMixin, StableDiffusionPipeline):
|
|
23
|
+
"""
|
|
24
|
+
RBLN-accelerated implementation of Stable Diffusion pipeline for text-to-image generation.
|
|
25
|
+
|
|
26
|
+
This pipeline compiles Stable Diffusion models to run efficiently on RBLN NPUs, enabling high-performance
|
|
27
|
+
inference for generating images from text prompts with optimized memory usage and throughput.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
original_class = StableDiffusionPipeline
|
|
31
|
+
_rbln_config_class = RBLNStableDiffusionPipelineConfig
|
|
32
|
+
_submodules = ["vae", "text_encoder", "unet"]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import StableDiffusionImg2ImgPipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNStableDiffusionImg2ImgPipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNStableDiffusionImg2ImgPipeline(RBLNDiffusionMixin, StableDiffusionImg2ImgPipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Stable Diffusion pipeline for image-to-image generation.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Stable Diffusion models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference for transforming input images based on text prompts with controlled strength and guidance.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = StableDiffusionImg2ImgPipeline
|
|
30
|
+
_rbln_config_class = RBLNStableDiffusionImg2ImgPipelineConfig
|
|
31
|
+
_submodules = ["text_encoder", "unet", "vae"]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import StableDiffusionInpaintPipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNStableDiffusionInpaintPipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNStableDiffusionInpaintPipeline(RBLNDiffusionMixin, StableDiffusionInpaintPipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Stable Diffusion pipeline for image inpainting.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Stable Diffusion models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference for filling masked regions of images based on text prompts with seamless integration.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = StableDiffusionInpaintPipeline
|
|
30
|
+
_rbln_config_class = RBLNStableDiffusionInpaintPipelineConfig
|
|
31
|
+
_submodules = ["text_encoder", "unet", "vae"]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from .pipeline_stable_diffusion_3 import RBLNStableDiffusion3Pipeline
|
|
16
|
+
from .pipeline_stable_diffusion_3_img2img import RBLNStableDiffusion3Img2ImgPipeline
|
|
17
|
+
from .pipeline_stable_diffusion_3_inpaint import RBLNStableDiffusion3InpaintPipeline
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from diffusers import StableDiffusion3Pipeline
|
|
16
|
+
|
|
17
|
+
from ...configurations import RBLNStableDiffusion3PipelineConfig
|
|
18
|
+
from ...modeling_diffusers import RBLNDiffusionMixin
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNStableDiffusion3Pipeline(RBLNDiffusionMixin, StableDiffusion3Pipeline):
|
|
22
|
+
"""
|
|
23
|
+
RBLN-accelerated implementation of Stable Diffusion 3 pipeline for advanced text-to-image generation.
|
|
24
|
+
|
|
25
|
+
This pipeline compiles Stable Diffusion 3 models to run efficiently on RBLN NPUs, enabling high-performance
|
|
26
|
+
inference with improved text understanding, enhanced image quality, and superior prompt adherence.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
original_class = StableDiffusion3Pipeline
|
|
30
|
+
_rbln_config_class = RBLNStableDiffusion3PipelineConfig
|
|
31
|
+
_submodules = ["transformer", "text_encoder_3", "text_encoder", "text_encoder_2", "vae"]
|