optimum-rbln 0.9.1__tar.gz → 0.9.2a1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of optimum-rbln might be problematic. Click here for more details.
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/PKG-INFO +2 -2
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/pyproject.toml +2 -1
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/__version__.py +2 -2
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/configuration_utils.py +54 -7
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_controlnet.py +30 -14
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_cosmos.py +11 -8
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_kandinsky2_2.py +23 -13
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion.py +10 -6
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_3.py +14 -10
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_xl.py +14 -7
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/cosmos/configuration_cosmos_guardrail.py +9 -11
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/blip_2/configuration_blip_2.py +35 -3
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/blip_2/modeling_blip_2.py +21 -22
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/clip/modeling_clip.py +4 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/colpali/colpali_architecture.py +2 -2
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/colpali/configuration_colpali.py +17 -1
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/colpali/modeling_colpali.py +72 -79
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +2 -2
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma3/configuration_gemma3.py +11 -3
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma3/modeling_gemma3.py +58 -43
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/idefics3/configuration_idefics3.py +27 -3
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +22 -15
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llava/configuration_llava.py +16 -2
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llava/modeling_llava.py +106 -49
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llava_next/configuration_llava_next.py +11 -13
- optimum_rbln-0.9.2a1/src/optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +494 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pixtral/modeling_pixtral.py +6 -11
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +11 -1
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +22 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_vl/modeling_qwen2_vl.py +11 -1
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_vl/qwen2_vl_architecture.py +22 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/siglip/modeling_siglip.py +3 -14
- optimum_rbln-0.9.2a1/src/optimum/rbln/transformers/utils/rbln_runtime_wrapper.py +79 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/submodule.py +21 -5
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/test_llm.py +25 -2
- optimum_rbln-0.9.2a1/uv.lock +2972 -0
- optimum_rbln-0.9.1/src/optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +0 -604
- optimum_rbln-0.9.1/uv.lock +0 -2580
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/ISSUE_TEMPLATE/config.yml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/ISSUE_TEMPLATE/model_request.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/pull_request_template.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/renovate.json +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/scripts/auto_code_review.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/scripts/validate_docstrings.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/scripts/validate_pr_checklist.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/version.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/auto_code_review.yml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/auto_dependency_bot.yml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/check_code_quality.yml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/deploy-on-tag.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/deploy.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/pr-title-check.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/pr_checklist_validator.yml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/rbln_check_compiler.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/rbln_dispatch_pytest.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/rbln_optimum_inference_test.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/rbln_optimum_pytest.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/rbln_scheduled_test.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/rbln_trigger_on_pr.yaml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.github/workflows/test-docstrings.yml +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/.gitignore +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/CODE_OF_CONDUCT.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/CONTRIBUTING.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/LICENSE +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/README.md +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/assets/rbln_logo.png +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/advanced/custom_class.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/audio-classification/run_ast_audio_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/depth-estimation/run_dpt.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/image-classification/run_image_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/image-classification/run_vit_image_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/image-to-text/run_idefics3.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/image-to-text/run_llava_next_image_to_text.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2_combined.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2_img2img_combined.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2_inpaint.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2_inpaint_combined.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/kandinsky2_2/run_kandinsky2_2_prior_interpolate.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/question-answering/run_question_answering.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/speech-recognition/run_wav2vec2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/speech-recognition/run_whisper.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion_controlnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion_img2img_controlnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion_inpaint.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion_lora.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/stable-diffusion/run_stable_diffusion_multicontrolnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text-classification/run_bge_m3_text_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text-classification/run_bge_reranker_v2_m3_text_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text-classification/run_secureBERT.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text-classification/run_t5_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text-classification/run_twitter_roberta_text_classification.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text2text-generation/run_bart_text2text_generation.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text2text-generation/run_llama_peft.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/text2text-generation/run_llama_text2text_generation.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/examples/time-series-forecasting/run_time_series_forecasting.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/scripts/uv-lock.sh +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/scripts/uv-sync.sh +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_cosmos.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_controlnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_prior_transformer.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_transformer_cosmos.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_transformer_sd3.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_unet_2d_condition.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/models/configuration_vq_model.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/configurations/pipelines/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/modeling_diffusers.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/autoencoders/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_cosmos.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/autoencoders/vae.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/autoencoders/vq_model.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/controlnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/transformers/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/transformers/prior_transformer.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/transformers/transformer_cosmos.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/transformers/transformer_sd3.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/unets/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/models/unets/unet_2d_condition.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/auto_pipeline.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/controlnet/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/cosmos/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/cosmos/cosmos_guardrail.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/modeling.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/modeling_base.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/ops/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/ops/attn.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/ops/flash_attn.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/ops/kv_cache_update.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/ops/linear.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/ops/sliding_window_attn.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/configuration_generic.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/modeling_attention_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/modeling_generic.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/modeling_outputs.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/modeling_rope_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/audio_spectrogram_transformer/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/auto/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/auto/auto_factory.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/auto/modeling_auto.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bart/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bart/bart_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bart/configuration_bart.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bart/modeling_bart.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bert/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bert/bert_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bert/configuration_bert.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/bert/modeling_bert.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/blip_2/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/clip/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/clip/configuration_clip.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/colpali/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/decoderonly/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/decoderonly/decoderonly_runtime_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/decoderonly/generation_decoderonly.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/depth_anything/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/depth_anything/configuration_depth_anything.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/distilbert/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/distilbert/configuration_distilbert.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/distilbert/modeling_distilbert.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/dpt/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/dpt/configuration_dpt.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/dpt/modeling_dpt.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/exaone/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/exaone/configuration_exaone.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/exaone/exaone_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/exaone/modeling_exaone.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma/configuration_gemma.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma/gemma_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma/modeling_gemma.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma3/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma3/gemma3_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gemma3/gemma3_runtime_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gpt2/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gpt2/configuration_gpt2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/grounding_dino/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/idefics3/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llama/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llama/configuration_llama.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llama/llama_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llama/modeling_llama.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llava/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/llava_next/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/midm/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/midm/configuration_midm.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/midm/midm_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/midm/modeling_midm.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/mistral/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/mistral/configuration_mistral.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/mistral/mistral_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/mistral/modeling_mistral.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/opt/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/opt/configuration_opt.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/opt/modeling_opt.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/opt/opt_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pegasus/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pegasus/configuration_pegasus.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pegasus/modeling_pegasus.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pegasus/pegasus_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/phi/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/phi/configuration_phi.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/phi/modeling_phi.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/phi/phi_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pixtral/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pixtral/configuration_pixtral.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/pixtral/pixtral_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2/configuration_qwen2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_vl/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen2_vl/configuration_qwen2_vl.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen3/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen3/configuration_qwen3.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen3/modeling_qwen3.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/qwen3/qwen3_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/resnet/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/resnet/configuration_resnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/resnet/modeling_resnet.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/roberta/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/roberta/configuration_roberta.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/roberta/modeling_roberta.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/seq2seq/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/seq2seq/configuration_seq2seq.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/siglip/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/siglip/configuration_siglip.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/swin/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/swin/configuration_swin.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/swin/modeling_swin.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/t5/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/t5/configuration_t5.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/t5/modeling_t5.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/t5/t5_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/time_series_transformer/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/time_series_transformer/configuration_time_series_transformer.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/time_series_transformer/modeling_time_series_transformer.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/time_series_transformer/time_series_transformers_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/vit/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/vit/configuration_vit.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/vit/modeling_vit.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/wav2vec2/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/wav2vec2/configuration_wav2vec2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/whisper/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/whisper/configuration_whisper.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/whisper/generation_whisper.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/whisper/modeling_whisper.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/whisper/whisper_architecture.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/xlm_roberta/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/xlm_roberta/configuration_xlm_roberta.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/utils/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/transformers/utils/rbln_quantization.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/decorator_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/depreacate_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/hub.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/import_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/logging.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/model_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/runtime_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/src/optimum/rbln/utils/save_utils.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/__init__.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/psnr.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/requirements_sdxl.txt +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/run_stable_diffusion_xl_base.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/test_base.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/test_config.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/test_diffusers.py +0 -0
- {optimum_rbln-0.9.1 → optimum_rbln-0.9.2a1}/tests/test_transformers.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: optimum-rbln
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.2a1
|
|
4
4
|
Summary: Optimum RBLN is the interface between the HuggingFace Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
|
|
5
5
|
Project-URL: Homepage, https://rebellions.ai
|
|
6
6
|
Project-URL: Documentation, https://docs.rbln.ai
|
|
@@ -29,7 +29,7 @@ Requires-Dist: packaging>=24.1
|
|
|
29
29
|
Requires-Dist: torch==2.7.0
|
|
30
30
|
Requires-Dist: torchaudio<=2.7.0
|
|
31
31
|
Requires-Dist: torchvision<=0.22.0
|
|
32
|
-
Requires-Dist: transformers==4.
|
|
32
|
+
Requires-Dist: transformers==4.53.1
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
|
|
35
35
|
|
|
@@ -33,7 +33,7 @@ dependencies = [
|
|
|
33
33
|
"torchaudio<=2.7.0",
|
|
34
34
|
"torchvision<=0.22.0",
|
|
35
35
|
"accelerate>=1.0.1",
|
|
36
|
-
"transformers==4.
|
|
36
|
+
"transformers==4.53.1",
|
|
37
37
|
"diffusers==0.35.1",
|
|
38
38
|
"packaging>=24.1",
|
|
39
39
|
]
|
|
@@ -57,6 +57,7 @@ tests = [
|
|
|
57
57
|
"soundfile>=0.13.1",
|
|
58
58
|
"librosa>=0.11.0",
|
|
59
59
|
"simphile>=1.0.2",
|
|
60
|
+
"torchcodec>=0.5.0"
|
|
60
61
|
]
|
|
61
62
|
quality = [
|
|
62
63
|
"ruff>=0.3.3",
|
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.9.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 9,
|
|
31
|
+
__version__ = version = '0.9.2a1'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 9, 2, 'a1')
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -534,18 +534,18 @@ class RBLNModelConfig(RBLNSerializableConfigProtocol):
|
|
|
534
534
|
submodules: List[str] = []
|
|
535
535
|
subclass_non_save_attributes = []
|
|
536
536
|
|
|
537
|
-
def
|
|
537
|
+
def initialize_submodule_config(
|
|
538
538
|
self,
|
|
539
|
-
submodule_config_cls: Type["RBLNModelConfig"],
|
|
540
539
|
submodule_config: Optional[Union[Dict[str, Any], "RBLNModelConfig"]] = None,
|
|
540
|
+
force_kwargs: bool = False,
|
|
541
541
|
**kwargs: Any,
|
|
542
542
|
) -> "RBLNModelConfig":
|
|
543
|
-
# Initialize a submodule config from a dict or a RBLNModelConfig.
|
|
544
|
-
# kwargs is specified from the predecessor config.
|
|
545
|
-
|
|
546
543
|
if submodule_config is None:
|
|
547
544
|
submodule_config = {}
|
|
548
545
|
|
|
546
|
+
if isinstance(submodule_config, RBLNModelConfig):
|
|
547
|
+
return submodule_config
|
|
548
|
+
|
|
549
549
|
if isinstance(submodule_config, dict):
|
|
550
550
|
from_predecessor = self._runtime_options.copy()
|
|
551
551
|
from_predecessor.update(
|
|
@@ -559,13 +559,60 @@ class RBLNModelConfig(RBLNSerializableConfigProtocol):
|
|
|
559
559
|
|
|
560
560
|
init_kwargs = from_predecessor
|
|
561
561
|
init_kwargs.update(submodule_config)
|
|
562
|
-
submodule_config = submodule_config_cls(**init_kwargs)
|
|
563
562
|
|
|
564
|
-
|
|
563
|
+
if force_kwargs:
|
|
564
|
+
for key, value in kwargs.items():
|
|
565
|
+
if key in init_kwargs:
|
|
566
|
+
if init_kwargs[key] != value:
|
|
567
|
+
raise ValueError(
|
|
568
|
+
f"Parameter conflict for '{key}': submodule_config has {init_kwargs[key]}, "
|
|
569
|
+
f"but kwargs has {value}. Using kwargs value: {value}"
|
|
570
|
+
)
|
|
571
|
+
init_kwargs[key] = value
|
|
572
|
+
|
|
573
|
+
if "cls_name" in init_kwargs:
|
|
574
|
+
config_cls = get_rbln_config_class(init_kwargs["cls_name"])
|
|
575
|
+
else:
|
|
576
|
+
return init_kwargs
|
|
577
|
+
|
|
578
|
+
submodule_config = config_cls(**init_kwargs)
|
|
579
|
+
|
|
580
|
+
if not isinstance(submodule_config, RBLNModelConfig):
|
|
565
581
|
raise TypeError(f"Invalid submodule config type: {type(submodule_config)}")
|
|
566
582
|
|
|
567
583
|
return submodule_config
|
|
568
584
|
|
|
585
|
+
def filter_parameters(self, config_cls: Type["RBLNModelConfig"], parameters: Dict[str, Any]) -> Dict[str, Any]:
|
|
586
|
+
import importlib
|
|
587
|
+
|
|
588
|
+
model_cls_name = config_cls.__name__.replace("Config", "")
|
|
589
|
+
modeling_module_name = config_cls.__module__.replace("configuration_", "modeling_")
|
|
590
|
+
|
|
591
|
+
model_cls = None
|
|
592
|
+
try:
|
|
593
|
+
modeling_module = importlib.import_module(modeling_module_name)
|
|
594
|
+
if hasattr(modeling_module, model_cls_name):
|
|
595
|
+
model_cls = getattr(modeling_module, model_cls_name)
|
|
596
|
+
except ImportError:
|
|
597
|
+
logger.debug(f"Could not import modeling module: {modeling_module_name}")
|
|
598
|
+
|
|
599
|
+
filtered_out_params = set()
|
|
600
|
+
|
|
601
|
+
if model_cls is not None:
|
|
602
|
+
if not getattr(model_cls, "_tp_support", False):
|
|
603
|
+
filtered_out_params.add("tensor_parallel_size")
|
|
604
|
+
|
|
605
|
+
filtered_params = {}
|
|
606
|
+
for key, value in parameters.items():
|
|
607
|
+
if key in filtered_out_params:
|
|
608
|
+
logger.debug(
|
|
609
|
+
f"Parameter '{key}' filtered out for {config_cls.__name__} (not supported by model flags)."
|
|
610
|
+
)
|
|
611
|
+
else:
|
|
612
|
+
filtered_params[key] = value
|
|
613
|
+
|
|
614
|
+
return filtered_params
|
|
615
|
+
|
|
569
616
|
def __setattr__(self, key, value):
|
|
570
617
|
if (
|
|
571
618
|
key != "_attributes_map"
|
|
@@ -93,20 +93,27 @@ class RBLNStableDiffusionControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
|
93
93
|
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
94
94
|
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
95
95
|
|
|
96
|
-
self.text_encoder = self.
|
|
97
|
-
|
|
98
|
-
|
|
96
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
97
|
+
text_encoder,
|
|
98
|
+
cls_name="RBLNCLIPTextModelConfig",
|
|
99
|
+
batch_size=batch_size,
|
|
100
|
+
)
|
|
101
|
+
self.unet = self.initialize_submodule_config(
|
|
99
102
|
unet,
|
|
103
|
+
cls_name="RBLNUNet2DConditionModelConfig",
|
|
100
104
|
sample_size=sample_size,
|
|
101
105
|
)
|
|
102
|
-
self.vae = self.
|
|
103
|
-
RBLNAutoencoderKLConfig,
|
|
106
|
+
self.vae = self.initialize_submodule_config(
|
|
104
107
|
vae,
|
|
108
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
105
109
|
batch_size=batch_size,
|
|
106
110
|
uses_encoder=self.__class__._vae_uses_encoder,
|
|
107
111
|
sample_size=image_size, # image size is equal to sample size in vae
|
|
108
112
|
)
|
|
109
|
-
self.controlnet = self.
|
|
113
|
+
self.controlnet = self.initialize_submodule_config(
|
|
114
|
+
controlnet,
|
|
115
|
+
cls_name="RBLNControlNetModelConfig",
|
|
116
|
+
)
|
|
110
117
|
|
|
111
118
|
# Get default guidance scale from original class to set UNet and ControlNet batch size
|
|
112
119
|
if guidance_scale is None:
|
|
@@ -235,23 +242,32 @@ class RBLNStableDiffusionXLControlNetPipelineBaseConfig(RBLNModelConfig):
|
|
|
235
242
|
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
236
243
|
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
237
244
|
|
|
238
|
-
self.text_encoder = self.
|
|
239
|
-
|
|
240
|
-
|
|
245
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
246
|
+
text_encoder,
|
|
247
|
+
cls_name="RBLNCLIPTextModelConfig",
|
|
248
|
+
batch_size=batch_size,
|
|
241
249
|
)
|
|
242
|
-
self.
|
|
243
|
-
|
|
250
|
+
self.text_encoder_2 = self.initialize_submodule_config(
|
|
251
|
+
text_encoder_2,
|
|
252
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
253
|
+
batch_size=batch_size,
|
|
254
|
+
)
|
|
255
|
+
self.unet = self.initialize_submodule_config(
|
|
244
256
|
unet,
|
|
257
|
+
cls_name="RBLNUNet2DConditionModelConfig",
|
|
245
258
|
sample_size=sample_size,
|
|
246
259
|
)
|
|
247
|
-
self.vae = self.
|
|
248
|
-
RBLNAutoencoderKLConfig,
|
|
260
|
+
self.vae = self.initialize_submodule_config(
|
|
249
261
|
vae,
|
|
262
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
250
263
|
batch_size=batch_size,
|
|
251
264
|
uses_encoder=self.__class__._vae_uses_encoder,
|
|
252
265
|
sample_size=image_size, # image size is equal to sample size in vae
|
|
253
266
|
)
|
|
254
|
-
self.controlnet = self.
|
|
267
|
+
self.controlnet = self.initialize_submodule_config(
|
|
268
|
+
controlnet,
|
|
269
|
+
cls_name="RBLNControlNetModelConfig",
|
|
270
|
+
)
|
|
255
271
|
|
|
256
272
|
# Get default guidance scale from original class to set UNet and ControlNet batch size
|
|
257
273
|
guidance_scale = (
|
|
@@ -63,12 +63,15 @@ class RBLNCosmosPipelineBaseConfig(RBLNModelConfig):
|
|
|
63
63
|
"""
|
|
64
64
|
super().__init__(**kwargs)
|
|
65
65
|
|
|
66
|
-
self.text_encoder = self.
|
|
67
|
-
|
|
66
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
67
|
+
text_encoder,
|
|
68
|
+
cls_name="RBLNT5EncoderModelConfig",
|
|
69
|
+
batch_size=batch_size,
|
|
70
|
+
max_seq_len=max_seq_len,
|
|
68
71
|
)
|
|
69
|
-
self.transformer = self.
|
|
70
|
-
RBLNCosmosTransformer3DModelConfig,
|
|
72
|
+
self.transformer = self.initialize_submodule_config(
|
|
71
73
|
transformer,
|
|
74
|
+
cls_name="RBLNCosmosTransformer3DModelConfig",
|
|
72
75
|
batch_size=batch_size,
|
|
73
76
|
max_seq_len=max_seq_len,
|
|
74
77
|
height=height,
|
|
@@ -76,18 +79,18 @@ class RBLNCosmosPipelineBaseConfig(RBLNModelConfig):
|
|
|
76
79
|
num_frames=num_frames,
|
|
77
80
|
fps=fps,
|
|
78
81
|
)
|
|
79
|
-
self.vae = self.
|
|
80
|
-
RBLNAutoencoderKLCosmosConfig,
|
|
82
|
+
self.vae = self.initialize_submodule_config(
|
|
81
83
|
vae,
|
|
84
|
+
cls_name="RBLNAutoencoderKLCosmosConfig",
|
|
82
85
|
batch_size=batch_size,
|
|
83
86
|
uses_encoder=self.__class__._vae_uses_encoder,
|
|
84
87
|
height=height,
|
|
85
88
|
width=width,
|
|
86
89
|
num_frames=num_frames,
|
|
87
90
|
)
|
|
88
|
-
self.safety_checker = self.
|
|
89
|
-
RBLNCosmosSafetyCheckerConfig,
|
|
91
|
+
self.safety_checker = self.initialize_submodule_config(
|
|
90
92
|
safety_checker,
|
|
93
|
+
cls_name="RBLNCosmosSafetyCheckerConfig",
|
|
91
94
|
batch_size=batch_size,
|
|
92
95
|
height=height,
|
|
93
96
|
width=width,
|
|
@@ -88,10 +88,14 @@ class RBLNKandinskyV22PipelineBaseConfig(RBLNModelConfig):
|
|
|
88
88
|
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
89
89
|
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
90
90
|
|
|
91
|
-
self.unet = self.
|
|
92
|
-
|
|
93
|
-
|
|
91
|
+
self.unet = self.initialize_submodule_config(
|
|
92
|
+
unet,
|
|
93
|
+
cls_name="RBLNUNet2DConditionModelConfig",
|
|
94
|
+
sample_size=sample_size,
|
|
95
|
+
)
|
|
96
|
+
self.movq = self.initialize_submodule_config(
|
|
94
97
|
movq,
|
|
98
|
+
cls_name="RBLNVQModelConfig",
|
|
95
99
|
batch_size=batch_size,
|
|
96
100
|
sample_size=image_size, # image size is equal to sample size in vae
|
|
97
101
|
uses_encoder=self._movq_uses_encoder,
|
|
@@ -173,14 +177,20 @@ class RBLNKandinskyV22PriorPipelineConfig(RBLNModelConfig):
|
|
|
173
177
|
accommodate classifier-free guidance.
|
|
174
178
|
"""
|
|
175
179
|
super().__init__(**kwargs)
|
|
176
|
-
self.text_encoder = self.
|
|
177
|
-
|
|
180
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
181
|
+
text_encoder,
|
|
182
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
183
|
+
batch_size=batch_size,
|
|
178
184
|
)
|
|
179
|
-
self.image_encoder = self.
|
|
180
|
-
|
|
185
|
+
self.image_encoder = self.initialize_submodule_config(
|
|
186
|
+
image_encoder,
|
|
187
|
+
cls_name="RBLNCLIPVisionModelWithProjectionConfig",
|
|
188
|
+
batch_size=batch_size,
|
|
189
|
+
)
|
|
190
|
+
self.prior = self.initialize_submodule_config(
|
|
191
|
+
prior,
|
|
192
|
+
cls_name="RBLNPriorTransformerConfig",
|
|
181
193
|
)
|
|
182
|
-
|
|
183
|
-
self.prior = self.init_submodule_config(RBLNPriorTransformerConfig, prior)
|
|
184
194
|
|
|
185
195
|
# Get default guidance scale from original class to set UNet batch size
|
|
186
196
|
if guidance_scale is None:
|
|
@@ -286,18 +296,18 @@ class RBLNKandinskyV22CombinedPipelineBaseConfig(RBLNModelConfig):
|
|
|
286
296
|
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
287
297
|
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
288
298
|
|
|
289
|
-
self.prior_pipe = self.
|
|
290
|
-
RBLNKandinskyV22PriorPipelineConfig,
|
|
299
|
+
self.prior_pipe = self.initialize_submodule_config(
|
|
291
300
|
prior_pipe,
|
|
301
|
+
cls_name="RBLNKandinskyV22PriorPipelineConfig",
|
|
292
302
|
prior=prior_prior,
|
|
293
303
|
image_encoder=prior_image_encoder,
|
|
294
304
|
text_encoder=prior_text_encoder,
|
|
295
305
|
batch_size=batch_size,
|
|
296
306
|
guidance_scale=guidance_scale,
|
|
297
307
|
)
|
|
298
|
-
self.decoder_pipe = self.
|
|
299
|
-
self._decoder_pipe_cls,
|
|
308
|
+
self.decoder_pipe = self.initialize_submodule_config(
|
|
300
309
|
decoder_pipe,
|
|
310
|
+
cls_name=self._decoder_pipe_cls.__name__,
|
|
301
311
|
unet=unet,
|
|
302
312
|
movq=movq,
|
|
303
313
|
batch_size=batch_size,
|
|
@@ -90,18 +90,22 @@ class RBLNStableDiffusionPipelineBaseConfig(RBLNModelConfig):
|
|
|
90
90
|
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
91
91
|
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
92
92
|
|
|
93
|
-
self.text_encoder = self.
|
|
94
|
-
|
|
95
|
-
|
|
93
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
94
|
+
text_encoder,
|
|
95
|
+
cls_name="RBLNCLIPTextModelConfig",
|
|
96
|
+
batch_size=batch_size,
|
|
97
|
+
)
|
|
98
|
+
self.unet = self.initialize_submodule_config(
|
|
96
99
|
unet,
|
|
100
|
+
cls_name="RBLNUNet2DConditionModelConfig",
|
|
97
101
|
sample_size=sample_size,
|
|
98
102
|
)
|
|
99
|
-
self.vae = self.
|
|
100
|
-
RBLNAutoencoderKLConfig,
|
|
103
|
+
self.vae = self.initialize_submodule_config(
|
|
101
104
|
vae,
|
|
105
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
102
106
|
batch_size=batch_size,
|
|
103
107
|
uses_encoder=self.__class__._vae_uses_encoder,
|
|
104
|
-
sample_size=image_size,
|
|
108
|
+
sample_size=image_size,
|
|
105
109
|
)
|
|
106
110
|
|
|
107
111
|
# Get default guidance scale from original class to set UNet batch size
|
|
@@ -100,27 +100,31 @@ class RBLNStableDiffusion3PipelineBaseConfig(RBLNModelConfig):
|
|
|
100
100
|
|
|
101
101
|
max_seq_len = max_seq_len or 256
|
|
102
102
|
|
|
103
|
-
self.text_encoder = self.
|
|
104
|
-
|
|
103
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
104
|
+
text_encoder,
|
|
105
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
106
|
+
batch_size=batch_size,
|
|
105
107
|
)
|
|
106
|
-
self.text_encoder_2 = self.
|
|
107
|
-
|
|
108
|
+
self.text_encoder_2 = self.initialize_submodule_config(
|
|
109
|
+
text_encoder_2,
|
|
110
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
111
|
+
batch_size=batch_size,
|
|
108
112
|
)
|
|
109
|
-
self.text_encoder_3 = self.
|
|
110
|
-
RBLNT5EncoderModelConfig,
|
|
113
|
+
self.text_encoder_3 = self.initialize_submodule_config(
|
|
111
114
|
text_encoder_3,
|
|
115
|
+
cls_name="RBLNT5EncoderModelConfig",
|
|
112
116
|
batch_size=batch_size,
|
|
113
117
|
max_seq_len=max_seq_len,
|
|
114
118
|
model_input_names=["input_ids"],
|
|
115
119
|
)
|
|
116
|
-
self.transformer = self.
|
|
117
|
-
RBLNSD3Transformer2DModelConfig,
|
|
120
|
+
self.transformer = self.initialize_submodule_config(
|
|
118
121
|
transformer,
|
|
122
|
+
cls_name="RBLNSD3Transformer2DModelConfig",
|
|
119
123
|
sample_size=sample_size,
|
|
120
124
|
)
|
|
121
|
-
self.vae = self.
|
|
122
|
-
RBLNAutoencoderKLConfig,
|
|
125
|
+
self.vae = self.initialize_submodule_config(
|
|
123
126
|
vae,
|
|
127
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
124
128
|
batch_size=batch_size,
|
|
125
129
|
uses_encoder=self.__class__._vae_uses_encoder,
|
|
126
130
|
sample_size=image_size,
|
|
@@ -93,18 +93,25 @@ class RBLNStableDiffusionXLPipelineBaseConfig(RBLNModelConfig):
|
|
|
93
93
|
elif (img_height is not None and img_width is None) or (img_height is None and img_width is not None):
|
|
94
94
|
raise ValueError("Both img_height and img_width must be provided together if used")
|
|
95
95
|
|
|
96
|
-
self.text_encoder = self.
|
|
97
|
-
|
|
98
|
-
|
|
96
|
+
self.text_encoder = self.initialize_submodule_config(
|
|
97
|
+
text_encoder,
|
|
98
|
+
cls_name="RBLNCLIPTextModelConfig",
|
|
99
|
+
batch_size=batch_size,
|
|
100
|
+
)
|
|
101
|
+
self.text_encoder_2 = self.initialize_submodule_config(
|
|
102
|
+
text_encoder_2,
|
|
103
|
+
cls_name="RBLNCLIPTextModelWithProjectionConfig",
|
|
104
|
+
batch_size=batch_size,
|
|
99
105
|
)
|
|
100
|
-
|
|
101
|
-
|
|
106
|
+
|
|
107
|
+
self.unet = self.initialize_submodule_config(
|
|
102
108
|
unet,
|
|
109
|
+
cls_name="RBLNUNet2DConditionModelConfig",
|
|
103
110
|
sample_size=sample_size,
|
|
104
111
|
)
|
|
105
|
-
self.vae = self.
|
|
106
|
-
RBLNAutoencoderKLConfig,
|
|
112
|
+
self.vae = self.initialize_submodule_config(
|
|
107
113
|
vae,
|
|
114
|
+
cls_name="RBLNAutoencoderKLConfig",
|
|
108
115
|
batch_size=batch_size,
|
|
109
116
|
uses_encoder=self.__class__._vae_uses_encoder,
|
|
110
117
|
sample_size=image_size, # image size is equal to sample size in vae
|
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
from typing import Any, Optional, Tuple
|
|
16
16
|
|
|
17
17
|
from ....configuration_utils import RBLNAutoConfig, RBLNModelConfig
|
|
18
|
-
from ....transformers import
|
|
18
|
+
from ....transformers import RBLNSiglipVisionModelConfig
|
|
19
19
|
|
|
20
20
|
|
|
21
21
|
class RBLNVideoSafetyModelConfig(RBLNModelConfig):
|
|
@@ -81,30 +81,28 @@ class RBLNCosmosSafetyCheckerConfig(RBLNModelConfig):
|
|
|
81
81
|
|
|
82
82
|
tensor_parallel_size = kwargs.get("tensor_parallel_size")
|
|
83
83
|
|
|
84
|
-
self.llamaguard3 = self.
|
|
85
|
-
RBLNLlamaForCausalLMConfig,
|
|
84
|
+
self.llamaguard3 = self.initialize_submodule_config(
|
|
86
85
|
llamaguard3,
|
|
86
|
+
cls_name="RBLNLlamaForCausalLMConfig",
|
|
87
87
|
batch_size=batch_size,
|
|
88
88
|
tensor_parallel_size=tensor_parallel_size,
|
|
89
89
|
max_seq_len=max_seq_len,
|
|
90
90
|
)
|
|
91
|
-
|
|
92
|
-
self.siglip_encoder = self.init_submodule_config(
|
|
93
|
-
RBLNSiglipVisionModelConfig,
|
|
91
|
+
self.siglip_encoder = self.initialize_submodule_config(
|
|
94
92
|
siglip_encoder,
|
|
93
|
+
cls_name="RBLNSiglipVisionModelConfig",
|
|
95
94
|
batch_size=batch_size,
|
|
96
95
|
image_size=(384, 384),
|
|
97
96
|
)
|
|
98
|
-
|
|
99
|
-
self.video_safety_model = self.init_submodule_config(
|
|
100
|
-
RBLNVideoSafetyModelConfig,
|
|
97
|
+
self.video_safety_model = self.initialize_submodule_config(
|
|
101
98
|
video_safety_model,
|
|
99
|
+
cls_name="RBLNVideoSafetyModelConfig",
|
|
102
100
|
batch_size=batch_size,
|
|
103
101
|
input_size=1152,
|
|
104
102
|
)
|
|
105
|
-
self.face_blur_filter = self.
|
|
106
|
-
RBLNRetinaFaceFilterConfig,
|
|
103
|
+
self.face_blur_filter = self.initialize_submodule_config(
|
|
107
104
|
face_blur_filter,
|
|
105
|
+
cls_name="RBLNRetinaFaceFilterConfig",
|
|
108
106
|
batch_size=batch_size,
|
|
109
107
|
image_size=image_size,
|
|
110
108
|
)
|
|
@@ -15,6 +15,10 @@
|
|
|
15
15
|
from typing import Any, Optional
|
|
16
16
|
|
|
17
17
|
from ....configuration_utils import RBLNModelConfig
|
|
18
|
+
from ....utils.logging import get_logger
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
18
22
|
|
|
19
23
|
|
|
20
24
|
class RBLNBlip2VisionModelConfig(RBLNModelConfig):
|
|
@@ -25,6 +29,16 @@ class RBLNBlip2VisionModelConfig(RBLNModelConfig):
|
|
|
25
29
|
RBLN-optimized BLIP-2 vision encoder models for multimodal tasks.
|
|
26
30
|
"""
|
|
27
31
|
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
batch_size: Optional[int] = None,
|
|
35
|
+
**kwargs,
|
|
36
|
+
):
|
|
37
|
+
super().__init__(**kwargs)
|
|
38
|
+
self.batch_size = batch_size or 1
|
|
39
|
+
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
|
40
|
+
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
|
41
|
+
|
|
28
42
|
|
|
29
43
|
class RBLNBlip2QFormerModelConfig(RBLNModelConfig):
|
|
30
44
|
"""
|
|
@@ -36,6 +50,7 @@ class RBLNBlip2QFormerModelConfig(RBLNModelConfig):
|
|
|
36
50
|
|
|
37
51
|
def __init__(
|
|
38
52
|
self,
|
|
53
|
+
batch_size: Optional[int] = None,
|
|
39
54
|
num_query_tokens: Optional[int] = None,
|
|
40
55
|
image_text_hidden_size: Optional[int] = None,
|
|
41
56
|
**kwargs,
|
|
@@ -47,11 +62,22 @@ class RBLNBlip2QFormerModelConfig(RBLNModelConfig):
|
|
|
47
62
|
kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
|
48
63
|
"""
|
|
49
64
|
super().__init__(**kwargs)
|
|
65
|
+
self.batch_size = batch_size or 1
|
|
66
|
+
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
|
67
|
+
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
|
68
|
+
|
|
50
69
|
self.num_query_tokens = num_query_tokens
|
|
51
70
|
self.image_text_hidden_size = image_text_hidden_size
|
|
52
71
|
|
|
53
72
|
|
|
54
73
|
class RBLNBlip2ForConditionalGenerationConfig(RBLNModelConfig):
|
|
74
|
+
"""
|
|
75
|
+
Configuration class for RBLNBlip2ForConditionalGeneration.
|
|
76
|
+
|
|
77
|
+
This configuration class stores the configuration parameters specific to
|
|
78
|
+
RBLN-optimized BLIP-2 models for conditional generation tasks that involve both image and text inputs.
|
|
79
|
+
"""
|
|
80
|
+
|
|
55
81
|
submodules = ["vision_model", "qformer", "language_model"]
|
|
56
82
|
|
|
57
83
|
def __init__(
|
|
@@ -78,6 +104,12 @@ class RBLNBlip2ForConditionalGenerationConfig(RBLNModelConfig):
|
|
|
78
104
|
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
|
79
105
|
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
|
80
106
|
|
|
81
|
-
self.
|
|
82
|
-
|
|
83
|
-
|
|
107
|
+
if self.batch_size != 1:
|
|
108
|
+
logger.warning("Ignore batch_size for Blip2 vision model. It will be set to 1.")
|
|
109
|
+
logger.warning("Ignore batch_size for Blip2 qformer. It will be set to 1.")
|
|
110
|
+
|
|
111
|
+
self.vision_model = self.initialize_submodule_config(
|
|
112
|
+
submodule_config=vision_model, batch_size=1, force_kwargs=True
|
|
113
|
+
)
|
|
114
|
+
self.qformer = self.initialize_submodule_config(submodule_config=qformer, batch_size=1, force_kwargs=True)
|
|
115
|
+
self.language_model = self.initialize_submodule_config(submodule_config=language_model)
|
|
@@ -30,34 +30,30 @@ from transformers.utils import logging
|
|
|
30
30
|
|
|
31
31
|
from ....configuration_utils import RBLNCompileConfig, RBLNModelConfig
|
|
32
32
|
from ....modeling import RBLNModel
|
|
33
|
+
from ...utils.rbln_runtime_wrapper import LoopProcessor
|
|
33
34
|
|
|
34
35
|
|
|
35
36
|
logger = logging.get_logger(__name__)
|
|
36
37
|
|
|
37
38
|
if TYPE_CHECKING:
|
|
39
|
+
import rebel
|
|
38
40
|
from transformers import AutoFeatureExtractor, AutoProcessor, AutoTokenizer
|
|
39
41
|
|
|
40
42
|
|
|
41
|
-
class LoopProjector:
|
|
42
|
-
def __init__(self, language_projection
|
|
43
|
-
|
|
43
|
+
class LoopProjector(LoopProcessor):
|
|
44
|
+
def __init__(self, language_projection: Union[RBLNModel, "rebel.Runtime"]):
|
|
45
|
+
super().__init__(model=language_projection)
|
|
44
46
|
|
|
45
|
-
def
|
|
46
|
-
query_output
|
|
47
|
+
def _get_batch_size(self, query_output, **kwargs):
|
|
48
|
+
return query_output.shape[0]
|
|
47
49
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
outputs.append(self.language_projection(query_output[i : i + 1]))
|
|
52
|
-
|
|
53
|
-
outputs = torch.cat(outputs, dim=0)
|
|
54
|
-
return outputs
|
|
50
|
+
def _prepare_inputs_for_iteration(self, index, common_inputs, query_output, **kwargs):
|
|
51
|
+
query_output_item = query_output[index : index + 1]
|
|
52
|
+
return ([query_output_item], {})
|
|
55
53
|
|
|
56
|
-
def
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def __repr__(self) -> str:
|
|
60
|
-
return repr(self.language_projection)
|
|
54
|
+
def _process_outputs(self, outputs: list, **kwargs):
|
|
55
|
+
output = torch.cat(outputs, dim=0)
|
|
56
|
+
return output
|
|
61
57
|
|
|
62
58
|
|
|
63
59
|
class RBLNBlip2VisionModel(RBLNModel):
|
|
@@ -68,6 +64,8 @@ class RBLNBlip2VisionModel(RBLNModel):
|
|
|
68
64
|
on RBLN devices, supporting image encoding for multimodal vision-language tasks.
|
|
69
65
|
"""
|
|
70
66
|
|
|
67
|
+
_tp_support = False
|
|
68
|
+
|
|
71
69
|
def get_input_embeddings(self):
|
|
72
70
|
return self.embeddings
|
|
73
71
|
|
|
@@ -96,8 +94,7 @@ class RBLNBlip2VisionModel(RBLNModel):
|
|
|
96
94
|
(
|
|
97
95
|
"pixel_values",
|
|
98
96
|
[
|
|
99
|
-
|
|
100
|
-
1,
|
|
97
|
+
rbln_config.batch_size,
|
|
101
98
|
model_config.num_channels,
|
|
102
99
|
model_config.image_size,
|
|
103
100
|
model_config.image_size,
|
|
@@ -147,6 +144,8 @@ class RBLNBlip2QFormerModel(RBLNModel):
|
|
|
147
144
|
mechanisms for multimodal understanding tasks.
|
|
148
145
|
"""
|
|
149
146
|
|
|
147
|
+
_tp_support = False
|
|
148
|
+
|
|
150
149
|
def get_input_embeddings(self):
|
|
151
150
|
return self.embeddings.word_embeddings
|
|
152
151
|
|
|
@@ -200,7 +199,7 @@ class RBLNBlip2QFormerModel(RBLNModel):
|
|
|
200
199
|
(
|
|
201
200
|
"query_embeds",
|
|
202
201
|
[
|
|
203
|
-
|
|
202
|
+
rbln_config.batch_size,
|
|
204
203
|
rbln_config.num_query_tokens,
|
|
205
204
|
model_config.hidden_size,
|
|
206
205
|
],
|
|
@@ -209,7 +208,7 @@ class RBLNBlip2QFormerModel(RBLNModel):
|
|
|
209
208
|
(
|
|
210
209
|
"encoder_hidden_states",
|
|
211
210
|
[
|
|
212
|
-
|
|
211
|
+
rbln_config.batch_size,
|
|
213
212
|
# image_text_hidden_size + cls token
|
|
214
213
|
rbln_config.image_text_hidden_size + 1,
|
|
215
214
|
model_config.encoder_hidden_size,
|
|
@@ -219,7 +218,7 @@ class RBLNBlip2QFormerModel(RBLNModel):
|
|
|
219
218
|
(
|
|
220
219
|
"encoder_attention_mask",
|
|
221
220
|
# image_text_hidden_size + cls token
|
|
222
|
-
[
|
|
221
|
+
[rbln_config.batch_size, rbln_config.image_text_hidden_size + 1],
|
|
223
222
|
"int64",
|
|
224
223
|
),
|
|
225
224
|
]
|