optimum-rbln 0.7.4a7__tar.gz → 0.7.4a9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/ISSUE_TEMPLATE/model_request.md +1 -1
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/scripts/auto_code_review.py +1 -1
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/PKG-INFO +2 -2
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/pyproject.toml +1 -1
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/__version__.py +2 -2
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/configuration_utils.py +46 -2
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/configuration_controlnet.py +8 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/configuration_prior_transformer.py +8 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/configuration_transformer_sd3.py +8 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/configuration_unet_2d_condition.py +8 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_controlnet.py +32 -17
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_kandinsky2_2.py +19 -15
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion.py +8 -8
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_3.py +8 -8
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_xl.py +8 -8
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/modeling_diffusers.py +50 -17
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +2 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/modeling_base.py +4 -3
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/auto/auto_factory.py +3 -3
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/hub.py +2 -2
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/model_utils.py +4 -4
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/test_base.py +16 -10
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/test_diffusers.py +1 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/test_llm.py +10 -1
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/ISSUE_TEMPLATE/config.yml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/pull_request_template.md +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/scripts/validate_pr_checklist.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/version.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/auto_code_review.yml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/check_code_quality.yml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/deploy-on-tag.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/deploy.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/pr-title-check.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/pr_checklist_validator.yml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/rbln_check_compiler.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/rbln_dispatch_pytest.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/rbln_optimum_inference_test.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/rbln_optimum_pytest.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/rbln_scheduled_test.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.github/workflows/rbln_trigger_on_pr.yaml +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/.gitignore +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/CODE_OF_CONDUCT.md +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/CONTRIBUTING.md +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/LICENSE +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/README.md +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/assets/rbln_logo.png +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/audio-classification/run_ast_audio_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/depth-estimation/run_dpt.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/image-classification/run_image_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/image-classification/run_vit_image_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/image-to-text/run_idefics3.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/image-to-text/run_llava_next_image_to_text.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2_combined.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2_img2img.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2_img2img_combined.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2_inpaint.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2_inpaint_combined.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/kandinsky2_2/run_kandinsky2_2_prior_interpolate.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/question-answering/run_question_answering.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/speech-recognition/run_wav2vec2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/speech-recognition/run_whisper.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion_controlnet.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion_img2img.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion_img2img_controlnet.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion_inpaint.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion_lora.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/stable-diffusion/run_stable_diffusion_multicontrolnet.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text-classification/run_bge_m3_text_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text-classification/run_bge_reranker_v2_m3_text_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text-classification/run_secureBERT.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text-classification/run_t5_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text-classification/run_twitter_roberta_text_classification.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text2text-generation/run_bart_text2text_generation.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text2text-generation/run_llama_peft.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/text2text-generation/run_llama_text2text_generation.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/examples/time-series-forecasting/run_time_series_forecasting.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/scripts/uv-lock.sh +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/scripts/uv-sync.sh +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/models/configuration_vq_model.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/configurations/pipelines/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/autoencoders/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/autoencoders/vae.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/autoencoders/vq_model.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/controlnet.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/transformers/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/transformers/prior_transformer.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/transformers/transformer_sd3.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/unets/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/models/unets/unet_2d_condition.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/controlnet/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/modeling.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/ops/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/ops/attn.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/ops/flash_attn.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/ops/kv_cache_update.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/ops/linear.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/configuration_alias.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/configuration_generic.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/modeling_alias.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/modeling_generic.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/modeling_rope_utils.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/auto/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/auto/modeling_auto.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bart/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bart/bart_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bart/configuration_bart.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bart/modeling_bart.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bert/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bert/configuration_bert.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/bert/modeling_bert.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/clip/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/clip/configuration_clip.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/clip/modeling_clip.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/decoderonly/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/dpt/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/dpt/configuration_dpt.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/dpt/modeling_dpt.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/exaone/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/exaone/configuration_exaone.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/exaone/exaone_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/exaone/modeling_exaone.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gemma/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gemma/configuration_gemma.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gemma/gemma_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gemma/modeling_gemma.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gpt2/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gpt2/configuration_gpt2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/idefics3/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/idefics3/configuration_idefics3.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llama/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llama/configuration_llama.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llama/llama_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llama/modeling_llama.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llava_next/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llava_next/configuration_llava_next.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/midm/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/midm/configuration_midm.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/midm/midm_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/midm/modeling_midm.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/mistral/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/mistral/configuration_mistral.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/mistral/mistral_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/mistral/modeling_mistral.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/phi/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/phi/configuration_phi.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/phi/modeling_phi.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/phi/phi_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2/configuration_qwen2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/seq2seq/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/seq2seq/configuration_seq2seq2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/t5/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/t5/configuration_t5.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/t5/modeling_t5.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/t5/t5_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/time_series_transformers/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/time_series_transformers/configuration_time_series_transformer.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/time_series_transformers/modeling_time_series_transformers.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/time_series_transformers/time_series_transformers_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/wav2vec2/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/wav2vec2/configuration_wav2vec.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/whisper/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/whisper/configuration_whisper.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/whisper/generation_whisper.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/whisper/modeling_whisper.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/whisper/whisper_architecture.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/xlm_roberta/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/xlm_roberta/configuration_xlm_roberta.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/utils/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/transformers/utils/rbln_quantization.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/decorator_utils.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/import_utils.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/logging.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/runtime_utils.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/save_utils.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/src/optimum/rbln/utils/submodule.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/__init__.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/psnr.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/requirements_sdxl.txt +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/run_stable_diffusion_xl_base.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/test_config.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/tests/test_transformers.py +0 -0
- {optimum_rbln-0.7.4a7 → optimum_rbln-0.7.4a9}/uv.lock +0 -0
@@ -36,7 +36,7 @@ def get_pr_diff():
|
|
36
36
|
|
37
37
|
|
38
38
|
def get_prompt(diff, pr):
|
39
|
-
system_prompt = """You are an experienced software engineer specializing in code reviews for deep learning libraries. Your task is to review code changes and related pull request (PR) information for `optimum-rbln`, a Python library that optimizes
|
39
|
+
system_prompt = """You are an experienced software engineer specializing in code reviews for deep learning libraries. Your task is to review code changes and related pull request (PR) information for `optimum-rbln`, a Python library that optimizes HuggingFace models for execution on RBLN NPUs.
|
40
40
|
|
41
41
|
Focus on providing actionable and constructive feedback. Don't make generalized suggestions."""
|
42
42
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: optimum-rbln
|
3
|
-
Version: 0.7.
|
4
|
-
Summary: Optimum RBLN is the interface between the
|
3
|
+
Version: 0.7.4a9
|
4
|
+
Summary: Optimum RBLN is the interface between the HuggingFace Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
|
5
5
|
Project-URL: Homepage, https://rebellions.ai
|
6
6
|
Project-URL: Documentation, https://docs.rbln.ai
|
7
7
|
Project-URL: Repository, https://github.com/rebellions-sw/optimum-rbln
|
@@ -1,7 +1,7 @@
|
|
1
1
|
[project]
|
2
2
|
name = "optimum-rbln"
|
3
3
|
description = """
|
4
|
-
Optimum RBLN is the interface between the
|
4
|
+
Optimum RBLN is the interface between the HuggingFace Transformers and Diffusers libraries and RBLN accelerators.
|
5
5
|
It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
|
6
6
|
"""
|
7
7
|
authors = [
|
@@ -17,5 +17,5 @@ __version__: str
|
|
17
17
|
__version_tuple__: VERSION_TUPLE
|
18
18
|
version_tuple: VERSION_TUPLE
|
19
19
|
|
20
|
-
__version__ = version = '0.7.
|
21
|
-
__version_tuple__ = version_tuple = (0, 7, 4, '
|
20
|
+
__version__ = version = '0.7.4a9'
|
21
|
+
__version_tuple__ = version_tuple = (0, 7, 4, 'a9')
|
@@ -174,6 +174,14 @@ class RBLNAutoConfig:
|
|
174
174
|
cls = getattr(importlib.import_module("optimum.rbln"), cls_name)
|
175
175
|
return cls(**kwargs)
|
176
176
|
|
177
|
+
@staticmethod
|
178
|
+
def load_from_dict(config_dict: Dict[str, Any]) -> "RBLNModelConfig":
|
179
|
+
cls_name = config_dict.get("cls_name")
|
180
|
+
if cls_name is None:
|
181
|
+
raise ValueError("`cls_name` is required.")
|
182
|
+
cls = getattr(importlib.import_module("optimum.rbln"), cls_name)
|
183
|
+
return cls(**config_dict)
|
184
|
+
|
177
185
|
@staticmethod
|
178
186
|
def load(
|
179
187
|
path: str,
|
@@ -195,8 +203,9 @@ class RBLNAutoConfig:
|
|
195
203
|
cls, config_file = load_config(path)
|
196
204
|
|
197
205
|
rbln_keys = [key for key in kwargs.keys() if key.startswith("rbln_")]
|
198
|
-
|
199
206
|
rbln_runtime_kwargs = {key[5:]: kwargs.pop(key) for key in rbln_keys if key[5:] in RUNTIME_KEYWORDS}
|
207
|
+
rbln_submodule_kwargs = {key[5:]: kwargs.pop(key) for key in rbln_keys if key[5:] in cls.submodules}
|
208
|
+
|
200
209
|
rbln_kwargs = {
|
201
210
|
key[5:]: kwargs.pop(key)
|
202
211
|
for key in rbln_keys
|
@@ -206,6 +215,14 @@ class RBLNAutoConfig:
|
|
206
215
|
if len(rbln_kwargs) > 0:
|
207
216
|
raise ValueError(f"Cannot set the following arguments: {list(rbln_kwargs.keys())}")
|
208
217
|
|
218
|
+
# Process submodule's rbln_config
|
219
|
+
for submodule in cls.submodules:
|
220
|
+
if submodule not in config_file:
|
221
|
+
raise ValueError(f"Submodule {submodule} not found in rbln_config.json.")
|
222
|
+
submodule_config = config_file[submodule]
|
223
|
+
submodule_config.update(rbln_submodule_kwargs.pop(submodule, {}))
|
224
|
+
config_file[submodule] = RBLNAutoConfig.load_from_dict(submodule_config)
|
225
|
+
|
209
226
|
if passed_rbln_config is not None:
|
210
227
|
config_file.update(passed_rbln_config._runtime_options)
|
211
228
|
# TODO(jongho): Reject if the passed_rbln_config has different attributes from the config_file
|
@@ -435,6 +452,7 @@ class RBLNModelConfig:
|
|
435
452
|
"activate_profiler",
|
436
453
|
]
|
437
454
|
submodules: List[str] = []
|
455
|
+
subclass_non_save_attributes = []
|
438
456
|
|
439
457
|
def init_submodule_config(
|
440
458
|
self,
|
@@ -463,7 +481,11 @@ class RBLNModelConfig:
|
|
463
481
|
return submodule_config
|
464
482
|
|
465
483
|
def __setattr__(self, key, value):
|
466
|
-
if
|
484
|
+
if (
|
485
|
+
key != "_attributes_map"
|
486
|
+
and key not in self.non_save_attributes
|
487
|
+
and key not in self.subclass_non_save_attributes
|
488
|
+
):
|
467
489
|
self._attributes_map[key] = value
|
468
490
|
|
469
491
|
if hasattr(self, "_frozen") and self._frozen:
|
@@ -706,6 +728,28 @@ class RBLNModelConfig:
|
|
706
728
|
|
707
729
|
return rbln_config, kwargs
|
708
730
|
|
731
|
+
def get_default_values_for_original_cls(self, func_name: str, keys: List[str]) -> Dict[str, Any]:
|
732
|
+
"""
|
733
|
+
Get default values for original class attributes from RBLNModelConfig.
|
734
|
+
|
735
|
+
Args:
|
736
|
+
func_name (str): The name of the function to get the default values for.
|
737
|
+
keys (List[str]): The keys of the attributes to get.
|
738
|
+
|
739
|
+
Returns:
|
740
|
+
Dict[str, Any]: The default values for the attributes.
|
741
|
+
"""
|
742
|
+
model_cls = self.rbln_model_cls.get_hf_class()
|
743
|
+
func = getattr(model_cls, func_name)
|
744
|
+
func_signature = inspect.signature(func)
|
745
|
+
default_values = {}
|
746
|
+
for key in keys:
|
747
|
+
if key in func_signature.parameters:
|
748
|
+
default_values[key] = func_signature.parameters[key].default
|
749
|
+
else:
|
750
|
+
raise ValueError(f"Default value for `{key}` is not set for the model class.")
|
751
|
+
return default_values
|
752
|
+
|
709
753
|
@property
|
710
754
|
def create_runtimes(self):
|
711
755
|
context = ContextRblnConfig.get_current_context()["create_runtimes"]
|
@@ -18,6 +18,8 @@ from ....configuration_utils import RBLNModelConfig
|
|
18
18
|
|
19
19
|
|
20
20
|
class RBLNControlNetModelConfig(RBLNModelConfig):
|
21
|
+
subclass_non_save_attributes = ["_batch_size_is_specified"]
|
22
|
+
|
21
23
|
def __init__(
|
22
24
|
self,
|
23
25
|
batch_size: Optional[int] = None,
|
@@ -44,6 +46,8 @@ class RBLNControlNetModelConfig(RBLNModelConfig):
|
|
44
46
|
ValueError: If batch_size is not a positive integer.
|
45
47
|
"""
|
46
48
|
super().__init__(**kwargs)
|
49
|
+
self._batch_size_is_specified = batch_size is not None
|
50
|
+
|
47
51
|
self.batch_size = batch_size or 1
|
48
52
|
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
49
53
|
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
@@ -52,3 +56,7 @@ class RBLNControlNetModelConfig(RBLNModelConfig):
|
|
52
56
|
self.unet_sample_size = unet_sample_size
|
53
57
|
self.vae_sample_size = vae_sample_size
|
54
58
|
self.text_model_hidden_size = text_model_hidden_size
|
59
|
+
|
60
|
+
@property
|
61
|
+
def batch_size_is_specified(self):
|
62
|
+
return self._batch_size_is_specified
|
@@ -18,6 +18,8 @@ from ....configuration_utils import RBLNModelConfig
|
|
18
18
|
|
19
19
|
|
20
20
|
class RBLNPriorTransformerConfig(RBLNModelConfig):
|
21
|
+
subclass_non_save_attributes = ["_batch_size_is_specified"]
|
22
|
+
|
21
23
|
def __init__(
|
22
24
|
self,
|
23
25
|
batch_size: Optional[int] = None,
|
@@ -36,9 +38,15 @@ class RBLNPriorTransformerConfig(RBLNModelConfig):
|
|
36
38
|
ValueError: If batch_size is not a positive integer.
|
37
39
|
"""
|
38
40
|
super().__init__(**kwargs)
|
41
|
+
self._batch_size_is_specified = batch_size is not None
|
42
|
+
|
39
43
|
self.batch_size = batch_size or 1
|
40
44
|
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
41
45
|
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
42
46
|
|
43
47
|
self.embedding_dim = embedding_dim
|
44
48
|
self.num_embeddings = num_embeddings
|
49
|
+
|
50
|
+
@property
|
51
|
+
def batch_size_is_specified(self):
|
52
|
+
return self._batch_size_is_specified
|
@@ -18,6 +18,8 @@ from ....configuration_utils import RBLNModelConfig
|
|
18
18
|
|
19
19
|
|
20
20
|
class RBLNSD3Transformer2DModelConfig(RBLNModelConfig):
|
21
|
+
subclass_non_save_attributes = ["_batch_size_is_specified"]
|
22
|
+
|
21
23
|
def __init__(
|
22
24
|
self,
|
23
25
|
batch_size: Optional[int] = None,
|
@@ -38,6 +40,8 @@ class RBLNSD3Transformer2DModelConfig(RBLNModelConfig):
|
|
38
40
|
ValueError: If batch_size is not a positive integer.
|
39
41
|
"""
|
40
42
|
super().__init__(**kwargs)
|
43
|
+
self._batch_size_is_specified = batch_size is not None
|
44
|
+
|
41
45
|
self.batch_size = batch_size or 1
|
42
46
|
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
43
47
|
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
@@ -46,3 +50,7 @@ class RBLNSD3Transformer2DModelConfig(RBLNModelConfig):
|
|
46
50
|
self.sample_size = sample_size
|
47
51
|
if isinstance(self.sample_size, int):
|
48
52
|
self.sample_size = (self.sample_size, self.sample_size)
|
53
|
+
|
54
|
+
@property
|
55
|
+
def batch_size_is_specified(self):
|
56
|
+
return self._batch_size_is_specified
|
@@ -18,6 +18,8 @@ from ....configuration_utils import RBLNModelConfig
|
|
18
18
|
|
19
19
|
|
20
20
|
class RBLNUNet2DConditionModelConfig(RBLNModelConfig):
|
21
|
+
subclass_non_save_attributes = ["_batch_size_is_specified"]
|
22
|
+
|
21
23
|
def __init__(
|
22
24
|
self,
|
23
25
|
batch_size: Optional[int] = None,
|
@@ -49,6 +51,8 @@ class RBLNUNet2DConditionModelConfig(RBLNModelConfig):
|
|
49
51
|
ValueError: If batch_size is not a positive integer.
|
50
52
|
"""
|
51
53
|
super().__init__(**kwargs)
|
54
|
+
self._batch_size_is_specified = batch_size is not None
|
55
|
+
|
52
56
|
self.batch_size = batch_size or 1
|
53
57
|
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
54
58
|
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
@@ -64,3 +68,7 @@ class RBLNUNet2DConditionModelConfig(RBLNModelConfig):
|
|
64
68
|
self.sample_size = sample_size
|
65
69
|
if isinstance(sample_size, int):
|
66
70
|
self.sample_size = (sample_size, sample_size)
|
71
|
+
|
72
|
+
@property
|
73
|
+
def batch_size_is_specified(self):
|
74
|
+
return self._batch_size_is_specified
|
@@ -16,13 +16,9 @@ from typing import Optional, Tuple
|
|
16
16
|
|
17
17
|
from ....configuration_utils import RBLNModelConfig
|
18
18
|
from ....transformers import RBLNCLIPTextModelConfig, RBLNCLIPTextModelWithProjectionConfig
|
19
|
-
from ....utils.logging import get_logger
|
20
19
|
from ..models import RBLNAutoencoderKLConfig, RBLNControlNetModelConfig, RBLNUNet2DConditionModelConfig
|
21
20
|
|
22
21
|
|
23
|
-
logger = get_logger(__name__)
|
24
|
-
|
25
|
-
|
26
22
|
class _RBLNStableDiffusionControlNetPipelineBaseConfig(RBLNModelConfig):
|
27
23
|
submodules = ["text_encoder", "unet", "vae", "controlnet"]
|
28
24
|
_vae_uses_encoder = False
|
@@ -58,7 +54,7 @@ class _RBLNStableDiffusionControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
58
54
|
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the UNet model.
|
59
55
|
image_size (Optional[Tuple[int, int]]): Alternative way to specify image dimensions.
|
60
56
|
Cannot be used together with img_height/img_width.
|
61
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
57
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
62
58
|
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
63
59
|
|
64
60
|
Raises:
|
@@ -79,7 +75,6 @@ class _RBLNStableDiffusionControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
79
75
|
self.unet = self.init_submodule_config(
|
80
76
|
RBLNUNet2DConditionModelConfig,
|
81
77
|
unet,
|
82
|
-
batch_size=batch_size,
|
83
78
|
sample_size=sample_size,
|
84
79
|
)
|
85
80
|
self.vae = self.init_submodule_config(
|
@@ -89,14 +84,24 @@ class _RBLNStableDiffusionControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
89
84
|
uses_encoder=self.__class__._vae_uses_encoder,
|
90
85
|
sample_size=image_size, # image size is equal to sample size in vae
|
91
86
|
)
|
92
|
-
self.controlnet = self.init_submodule_config(RBLNControlNetModelConfig, controlnet
|
87
|
+
self.controlnet = self.init_submodule_config(RBLNControlNetModelConfig, controlnet)
|
88
|
+
|
89
|
+
# Get default guidance scale from original class to set UNet and ControlNet batch size
|
90
|
+
if guidance_scale is None:
|
91
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
93
92
|
|
94
93
|
if guidance_scale is not None:
|
95
|
-
logger.warning("Specifying `guidance_scale` is deprecated. It will be removed in a future version.")
|
96
94
|
do_classifier_free_guidance = guidance_scale > 1.0
|
97
95
|
if do_classifier_free_guidance:
|
98
|
-
|
99
|
-
|
96
|
+
if not self.unet.batch_size_is_specified:
|
97
|
+
self.unet.batch_size = self.text_encoder.batch_size * 2
|
98
|
+
if not self.controlnet.batch_size_is_specified:
|
99
|
+
self.controlnet.batch_size = self.text_encoder.batch_size * 2
|
100
|
+
else:
|
101
|
+
if not self.unet.batch_size_is_specified:
|
102
|
+
self.unet.batch_size = self.text_encoder.batch_size
|
103
|
+
if not self.controlnet.batch_size_is_specified:
|
104
|
+
self.controlnet.batch_size = self.text_encoder.batch_size
|
100
105
|
|
101
106
|
@property
|
102
107
|
def batch_size(self):
|
@@ -157,7 +162,7 @@ class _RBLNStableDiffusionXLControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
157
162
|
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the UNet model.
|
158
163
|
image_size (Optional[Tuple[int, int]]): Alternative way to specify image dimensions.
|
159
164
|
Cannot be used together with img_height/img_width.
|
160
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
165
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
161
166
|
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
162
167
|
|
163
168
|
Raises:
|
@@ -181,7 +186,6 @@ class _RBLNStableDiffusionXLControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
181
186
|
self.unet = self.init_submodule_config(
|
182
187
|
RBLNUNet2DConditionModelConfig,
|
183
188
|
unet,
|
184
|
-
batch_size=batch_size,
|
185
189
|
sample_size=sample_size,
|
186
190
|
)
|
187
191
|
self.vae = self.init_submodule_config(
|
@@ -191,14 +195,25 @@ class _RBLNStableDiffusionXLControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
191
195
|
uses_encoder=self.__class__._vae_uses_encoder,
|
192
196
|
sample_size=image_size, # image size is equal to sample size in vae
|
193
197
|
)
|
194
|
-
self.controlnet = self.init_submodule_config(RBLNControlNetModelConfig, controlnet
|
198
|
+
self.controlnet = self.init_submodule_config(RBLNControlNetModelConfig, controlnet)
|
195
199
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
+
# Get default guidance scale from original class to set UNet and ControlNet batch size
|
201
|
+
guidance_scale = (
|
202
|
+
guidance_scale
|
203
|
+
or self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
204
|
+
)
|
205
|
+
|
206
|
+
do_classifier_free_guidance = guidance_scale > 1.0
|
207
|
+
if do_classifier_free_guidance:
|
208
|
+
if not self.unet.batch_size_is_specified:
|
200
209
|
self.unet.batch_size = self.text_encoder.batch_size * 2
|
210
|
+
if not self.controlnet.batch_size_is_specified:
|
201
211
|
self.controlnet.batch_size = self.text_encoder.batch_size * 2
|
212
|
+
else:
|
213
|
+
if not self.unet.batch_size_is_specified:
|
214
|
+
self.unet.batch_size = self.text_encoder.batch_size
|
215
|
+
if not self.controlnet.batch_size_is_specified:
|
216
|
+
self.controlnet.batch_size = self.text_encoder.batch_size
|
202
217
|
|
203
218
|
@property
|
204
219
|
def batch_size(self):
|
@@ -16,14 +16,10 @@ from typing import Optional, Tuple
|
|
16
16
|
|
17
17
|
from ....configuration_utils import RBLNModelConfig
|
18
18
|
from ....transformers import RBLNCLIPTextModelWithProjectionConfig, RBLNCLIPVisionModelWithProjectionConfig
|
19
|
-
from ....utils.logging import get_logger
|
20
19
|
from ..models import RBLNUNet2DConditionModelConfig, RBLNVQModelConfig
|
21
20
|
from ..models.configuration_prior_transformer import RBLNPriorTransformerConfig
|
22
21
|
|
23
22
|
|
24
|
-
logger = get_logger(__name__)
|
25
|
-
|
26
|
-
|
27
23
|
class _RBLNKandinskyV22PipelineBaseConfig(RBLNModelConfig):
|
28
24
|
submodules = ["unet", "movq"]
|
29
25
|
_movq_uses_encoder = False
|
@@ -49,7 +45,7 @@ class _RBLNKandinskyV22PipelineBaseConfig(RBLNModelConfig):
|
|
49
45
|
Initialized as RBLNVQModelConfig if not provided.
|
50
46
|
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the UNet model.
|
51
47
|
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
52
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
48
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
53
49
|
image_size (Optional[Tuple[int, int]]): Dimensions for the generated images.
|
54
50
|
Cannot be used together with img_height/img_width.
|
55
51
|
img_height (Optional[int]): Height of the generated images.
|
@@ -70,9 +66,7 @@ class _RBLNKandinskyV22PipelineBaseConfig(RBLNModelConfig):
|
|
70
66
|
if img_height is not None and img_width is not None:
|
71
67
|
image_size = (img_height, img_width)
|
72
68
|
|
73
|
-
self.unet = self.init_submodule_config(
|
74
|
-
RBLNUNet2DConditionModelConfig, unet, batch_size=batch_size, sample_size=sample_size
|
75
|
-
)
|
69
|
+
self.unet = self.init_submodule_config(RBLNUNet2DConditionModelConfig, unet, sample_size=sample_size)
|
76
70
|
self.movq = self.init_submodule_config(
|
77
71
|
RBLNVQModelConfig,
|
78
72
|
movq,
|
@@ -81,11 +75,16 @@ class _RBLNKandinskyV22PipelineBaseConfig(RBLNModelConfig):
|
|
81
75
|
uses_encoder=self._movq_uses_encoder,
|
82
76
|
)
|
83
77
|
|
84
|
-
|
85
|
-
|
78
|
+
# Get default guidance scale from original class to set UNet batch size
|
79
|
+
if guidance_scale is None:
|
80
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
81
|
+
|
82
|
+
if not self.unet.batch_size_is_specified:
|
86
83
|
do_classifier_free_guidance = guidance_scale > 1.0
|
87
84
|
if do_classifier_free_guidance:
|
88
85
|
self.unet.batch_size = self.movq.batch_size * 2
|
86
|
+
else:
|
87
|
+
self.unet.batch_size = self.movq.batch_size
|
89
88
|
|
90
89
|
@property
|
91
90
|
def batch_size(self):
|
@@ -136,7 +135,7 @@ class RBLNKandinskyV22PriorPipelineConfig(RBLNModelConfig):
|
|
136
135
|
prior (Optional[RBLNPriorTransformerConfig]): Configuration for the prior transformer component.
|
137
136
|
Initialized as RBLNPriorTransformerConfig if not provided.
|
138
137
|
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
139
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
138
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
140
139
|
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
141
140
|
|
142
141
|
Note:
|
@@ -151,13 +150,18 @@ class RBLNKandinskyV22PriorPipelineConfig(RBLNModelConfig):
|
|
151
150
|
RBLNCLIPVisionModelWithProjectionConfig, image_encoder, batch_size=batch_size
|
152
151
|
)
|
153
152
|
|
154
|
-
self.prior = self.init_submodule_config(RBLNPriorTransformerConfig, prior
|
153
|
+
self.prior = self.init_submodule_config(RBLNPriorTransformerConfig, prior)
|
154
|
+
|
155
|
+
# Get default guidance scale from original class to set UNet batch size
|
156
|
+
if guidance_scale is None:
|
157
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
155
158
|
|
156
|
-
if
|
157
|
-
logger.warning("Specifying `guidance_scale` is deprecated. It will be removed in a future version.")
|
159
|
+
if not self.prior.batch_size_is_specified:
|
158
160
|
do_classifier_free_guidance = guidance_scale > 1.0
|
159
161
|
if do_classifier_free_guidance:
|
160
162
|
self.prior.batch_size = self.text_encoder.batch_size * 2
|
163
|
+
else:
|
164
|
+
self.prior.batch_size = self.text_encoder.batch_size
|
161
165
|
|
162
166
|
@property
|
163
167
|
def batch_size(self):
|
@@ -208,7 +212,7 @@ class _RBLNKandinskyV22CombinedPipelineBaseConfig(RBLNModelConfig):
|
|
208
212
|
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
209
213
|
img_height (Optional[int]): Height of the generated images.
|
210
214
|
img_width (Optional[int]): Width of the generated images.
|
211
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
215
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
212
216
|
prior_prior (Optional[RBLNPriorTransformerConfig]): Direct configuration for the prior transformer.
|
213
217
|
Used if prior_pipe is not provided.
|
214
218
|
prior_image_encoder (Optional[RBLNCLIPVisionModelWithProjectionConfig]): Direct configuration for the image encoder.
|
@@ -16,13 +16,9 @@ from typing import Optional, Tuple
|
|
16
16
|
|
17
17
|
from ....configuration_utils import RBLNModelConfig
|
18
18
|
from ....transformers import RBLNCLIPTextModelConfig
|
19
|
-
from ....utils.logging import get_logger
|
20
19
|
from ..models import RBLNAutoencoderKLConfig, RBLNUNet2DConditionModelConfig
|
21
20
|
|
22
21
|
|
23
|
-
logger = get_logger(__name__)
|
24
|
-
|
25
|
-
|
26
22
|
class _RBLNStableDiffusionPipelineBaseConfig(RBLNModelConfig):
|
27
23
|
submodules = ["text_encoder", "unet", "vae"]
|
28
24
|
_vae_uses_encoder = False
|
@@ -55,7 +51,7 @@ class _RBLNStableDiffusionPipelineBaseConfig(RBLNModelConfig):
|
|
55
51
|
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the UNet model.
|
56
52
|
image_size (Optional[Tuple[int, int]]): Alternative way to specify image dimensions.
|
57
53
|
Cannot be used together with img_height/img_width.
|
58
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
54
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
59
55
|
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
60
56
|
|
61
57
|
Raises:
|
@@ -76,7 +72,6 @@ class _RBLNStableDiffusionPipelineBaseConfig(RBLNModelConfig):
|
|
76
72
|
self.unet = self.init_submodule_config(
|
77
73
|
RBLNUNet2DConditionModelConfig,
|
78
74
|
unet,
|
79
|
-
batch_size=batch_size,
|
80
75
|
sample_size=sample_size,
|
81
76
|
)
|
82
77
|
self.vae = self.init_submodule_config(
|
@@ -87,11 +82,16 @@ class _RBLNStableDiffusionPipelineBaseConfig(RBLNModelConfig):
|
|
87
82
|
sample_size=image_size, # image size is equal to sample size in vae
|
88
83
|
)
|
89
84
|
|
90
|
-
|
91
|
-
|
85
|
+
# Get default guidance scale from original class to set UNet batch size
|
86
|
+
if guidance_scale is None:
|
87
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
88
|
+
|
89
|
+
if not self.unet.batch_size_is_specified:
|
92
90
|
do_classifier_free_guidance = guidance_scale > 1.0
|
93
91
|
if do_classifier_free_guidance:
|
94
92
|
self.unet.batch_size = self.text_encoder.batch_size * 2
|
93
|
+
else:
|
94
|
+
self.unet.batch_size = self.text_encoder.batch_size
|
95
95
|
|
96
96
|
@property
|
97
97
|
def batch_size(self):
|
@@ -16,13 +16,9 @@ from typing import Optional, Tuple
|
|
16
16
|
|
17
17
|
from ....configuration_utils import RBLNModelConfig
|
18
18
|
from ....transformers import RBLNCLIPTextModelWithProjectionConfig, RBLNT5EncoderModelConfig
|
19
|
-
from ....utils.logging import get_logger
|
20
19
|
from ..models import RBLNAutoencoderKLConfig, RBLNSD3Transformer2DModelConfig
|
21
20
|
|
22
21
|
|
23
|
-
logger = get_logger(__name__)
|
24
|
-
|
25
|
-
|
26
22
|
class _RBLNStableDiffusion3PipelineBaseConfig(RBLNModelConfig):
|
27
23
|
submodules = ["transformer", "text_encoder", "text_encoder_2", "text_encoder_3", "vae"]
|
28
24
|
_vae_uses_encoder = False
|
@@ -63,7 +59,7 @@ class _RBLNStableDiffusion3PipelineBaseConfig(RBLNModelConfig):
|
|
63
59
|
batch_size (Optional[int]): Batch size for inference, applied to all submodules.
|
64
60
|
img_height (Optional[int]): Height of the generated images.
|
65
61
|
img_width (Optional[int]): Width of the generated images.
|
66
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
62
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
67
63
|
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
68
64
|
|
69
65
|
Raises:
|
@@ -97,7 +93,6 @@ class _RBLNStableDiffusion3PipelineBaseConfig(RBLNModelConfig):
|
|
97
93
|
self.transformer = self.init_submodule_config(
|
98
94
|
RBLNSD3Transformer2DModelConfig,
|
99
95
|
transformer,
|
100
|
-
batch_size=batch_size,
|
101
96
|
sample_size=sample_size,
|
102
97
|
)
|
103
98
|
self.vae = self.init_submodule_config(
|
@@ -108,11 +103,16 @@ class _RBLNStableDiffusion3PipelineBaseConfig(RBLNModelConfig):
|
|
108
103
|
sample_size=image_size,
|
109
104
|
)
|
110
105
|
|
111
|
-
|
112
|
-
|
106
|
+
# Get default guidance scale from original class to set Transformer batch size
|
107
|
+
if guidance_scale is None:
|
108
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
109
|
+
|
110
|
+
if not self.transformer.batch_size_is_specified:
|
113
111
|
do_classifier_free_guidance = guidance_scale > 1.0
|
114
112
|
if do_classifier_free_guidance:
|
115
113
|
self.transformer.batch_size = self.text_encoder.batch_size * 2
|
114
|
+
else:
|
115
|
+
self.transformer.batch_size = self.text_encoder.batch_size
|
116
116
|
|
117
117
|
@property
|
118
118
|
def max_seq_len(self):
|
@@ -16,13 +16,9 @@ from typing import Optional, Tuple
|
|
16
16
|
|
17
17
|
from ....configuration_utils import RBLNModelConfig
|
18
18
|
from ....transformers import RBLNCLIPTextModelConfig, RBLNCLIPTextModelWithProjectionConfig
|
19
|
-
from ....utils.logging import get_logger
|
20
19
|
from ..models import RBLNAutoencoderKLConfig, RBLNUNet2DConditionModelConfig
|
21
20
|
|
22
21
|
|
23
|
-
logger = get_logger(__name__)
|
24
|
-
|
25
|
-
|
26
22
|
class _RBLNStableDiffusionXLPipelineBaseConfig(RBLNModelConfig):
|
27
23
|
submodules = ["text_encoder", "text_encoder_2", "unet", "vae"]
|
28
24
|
_vae_uses_encoder = False
|
@@ -58,7 +54,7 @@ class _RBLNStableDiffusionXLPipelineBaseConfig(RBLNModelConfig):
|
|
58
54
|
sample_size (Optional[Tuple[int, int]]): Spatial dimensions for the UNet model.
|
59
55
|
image_size (Optional[Tuple[int, int]]): Alternative way to specify image dimensions.
|
60
56
|
Cannot be used together with img_height/img_width.
|
61
|
-
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
57
|
+
guidance_scale (Optional[float]): Scale for classifier-free guidance.
|
62
58
|
**kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
63
59
|
|
64
60
|
Raises:
|
@@ -82,7 +78,6 @@ class _RBLNStableDiffusionXLPipelineBaseConfig(RBLNModelConfig):
|
|
82
78
|
self.unet = self.init_submodule_config(
|
83
79
|
RBLNUNet2DConditionModelConfig,
|
84
80
|
unet,
|
85
|
-
batch_size=batch_size,
|
86
81
|
sample_size=sample_size,
|
87
82
|
)
|
88
83
|
self.vae = self.init_submodule_config(
|
@@ -93,11 +88,16 @@ class _RBLNStableDiffusionXLPipelineBaseConfig(RBLNModelConfig):
|
|
93
88
|
sample_size=image_size, # image size is equal to sample size in vae
|
94
89
|
)
|
95
90
|
|
96
|
-
|
97
|
-
|
91
|
+
# Get default guidance scale from original class to set UNet batch size
|
92
|
+
if guidance_scale is None:
|
93
|
+
guidance_scale = self.get_default_values_for_original_cls("__call__", ["guidance_scale"])["guidance_scale"]
|
94
|
+
|
95
|
+
if not self.unet.batch_size_is_specified:
|
98
96
|
do_classifier_free_guidance = guidance_scale > 1.0
|
99
97
|
if do_classifier_free_guidance:
|
100
98
|
self.unet.batch_size = self.text_encoder.batch_size * 2
|
99
|
+
else:
|
100
|
+
self.unet.batch_size = self.text_encoder.batch_size
|
101
101
|
|
102
102
|
@property
|
103
103
|
def batch_size(self):
|