optimum-rbln 0.2.1a1__tar.gz → 0.2.1a2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/rbln_trigger_on_pr.yaml +1 -1
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/PKG-INFO +2 -2
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/pyproject.toml +1 -1
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/__init__.py +2 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/__version__.py +1 -1
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/ops/attn.py +4 -4
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/__init__.py +2 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/__init__.py +2 -2
- optimum_rbln-0.2.1a2/src/optimum/rbln/transformers/models/bert/__init__.py +15 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/bert/modeling_bert.py +5 -1
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +6 -2
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +15 -32
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/t5/t5_architecture.py +29 -6
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/test_llm.py +12 -12
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/test_transformers.py +13 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/uv.lock +17 -49
- optimum_rbln-0.2.1a1/src/optimum/rbln/transformers/models/bert/__init__.py +0 -15
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/ISSUE_TEMPLATE/config.yml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/ISSUE_TEMPLATE/model_request.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/pull_request_template.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/scripts/auto_code_review.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/scripts/validate_pr_checklist.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/auto_code_review.yml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/check_code_quality.yml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/deploy-on-tag.yaml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/deploy.yaml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/pr-title-check.yaml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/pr_checklist_validator.yml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/rbln_dispatch_pytest.yaml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/rbln_optimum_inference_test.yaml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/rbln_optimum_pytest.yaml +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.gitignore +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/CODE_OF_CONDUCT.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/CONTRIBUTING.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/LICENSE +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/README.md +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/assets/rbln_logo.png +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/audio-classification/run_ast_audio_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/depth-estimation/run_dpt.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/image-classification/run_image_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/image-classification/run_vit_image_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/image-to-text/run_llava_next_image_to_text.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/question-answering/run_question_answering.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/speech-recognition/run_wav2vec2.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/speech-recognition/run_whisper.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion_controlnet.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion_img2img.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion_img2img_controlnet.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion_inpaint.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion_lora.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/stable-diffusion/run_stable_diffusion_multicontrolnet.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text-classification/run_bge_m3_text_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text-classification/run_bge_reranker_v2_m3_text_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text-classification/run_secureBERT.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text-classification/run_t5_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text-classification/run_twitter_roberta_text_classification.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text2text-generation/run_bart_text2text_generation.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text2text-generation/run_llama_peft.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/examples/text2text-generation/run_llama_text2text_generation.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/scripts/uv-lock.sh +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/scripts/uv-sync.sh +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/modeling_diffusers.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/autoencoders/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/autoencoders/vae.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/controlnet.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/transformers/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/transformers/transformer_sd3.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/unets/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/models/unets/unet_2d_condition.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/controlnet/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/modeling.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/modeling_base.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/modeling_config.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/ops/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/ops/flash_attn.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/ops/kv_cache_update.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/modeling_alias.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/modeling_generic.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/modeling_rope_utils.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/auto/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/auto/auto_factory.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/auto/modeling_auto.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/bart/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/bart/bart_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/bart/modeling_bart.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/clip/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/clip/modeling_clip.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/decoderonly/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/dpt/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/dpt/modeling_dpt.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/exaone/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/exaone/exaone_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/exaone/modeling_exaone.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/gemma/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/gemma/gemma_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/gemma/modeling_gemma.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/gpt2/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/llama/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/llama/llama_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/llama/modeling_llama.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/llava_next/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/midm/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/midm/midm_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/midm/modeling_midm.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/mistral/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/mistral/mistral_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/mistral/modeling_mistral.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/phi/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/phi/modeling_phi.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/phi/phi_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/qwen2/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/seq2seq/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/t5/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/t5/modeling_t5.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/wav2vec2/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/whisper/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/whisper/generation_whisper.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/whisper/modeling_whisper.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/whisper/whisper_architecture.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/xlm_roberta/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/utils/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/utils/rbln_quantization.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/decorator_utils.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/hub.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/import_utils.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/logging.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/model_utils.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/runtime_utils.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/save_utils.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/utils/submodule.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/__init__.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/psnr.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/requirements_sdxl.txt +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/run_stable_diffusion_xl_base.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/test_base.py +0 -0
- {optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/tests/test_diffusers.py +0 -0
@@ -9,7 +9,7 @@ env:
|
|
9
9
|
REBEL_PYPI_ENDPOINT: ${{ vars.REBEL_PYPI_INTERNAL_ENDPOINT }}
|
10
10
|
REBEL_PYPI_USERNAME: ${{ secrets.REBEL_PYPI_USERNAME }}
|
11
11
|
REBEL_PYPI_PASSWORD: ${{ secrets.REBEL_PYPI_PASSWORD }}
|
12
|
-
REBEL_COMPILER_VERSION: 0.7.
|
12
|
+
REBEL_COMPILER_VERSION: 0.7.2.dev151+g19f099fd
|
13
13
|
|
14
14
|
jobs:
|
15
15
|
check-rebel-compiler-version:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: optimum-rbln
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.1a2
|
4
4
|
Summary: Optimum RBLN is the interface between the Hugging Face Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
|
5
5
|
Project-URL: Homepage, https://rebellions.ai
|
6
6
|
Project-URL: Documentation, https://docs.rbln.ai
|
@@ -28,7 +28,7 @@ Requires-Dist: packaging>=24.1
|
|
28
28
|
Requires-Dist: torch<=2.5.1
|
29
29
|
Requires-Dist: torchaudio<=2.5.1
|
30
30
|
Requires-Dist: torchvision<=0.20.1
|
31
|
-
Requires-Dist: transformers==4.
|
31
|
+
Requires-Dist: transformers==4.48.3
|
32
32
|
Description-Content-Type: text/markdown
|
33
33
|
|
34
34
|
|
@@ -71,6 +71,7 @@ _import_structure = {
|
|
71
71
|
"RBLNRobertaForSequenceClassification",
|
72
72
|
"RBLNRobertaForMaskedLM",
|
73
73
|
"RBLNViTForImageClassification",
|
74
|
+
"RBLNBertForMaskedLM",
|
74
75
|
],
|
75
76
|
"diffusers": [
|
76
77
|
"RBLNStableDiffusionPipeline",
|
@@ -141,6 +142,7 @@ if TYPE_CHECKING:
|
|
141
142
|
RBLNAutoModelForVision2Seq,
|
142
143
|
RBLNBartForConditionalGeneration,
|
143
144
|
RBLNBartModel,
|
145
|
+
RBLNBertForMaskedLM,
|
144
146
|
RBLNBertForQuestionAnswering,
|
145
147
|
RBLNBertModel,
|
146
148
|
RBLNCLIPTextModel,
|
@@ -152,16 +152,16 @@ def register_rbln_custom_attention_add_softmax():
|
|
152
152
|
"""
|
153
153
|
return (
|
154
154
|
q,
|
155
|
-
torch.empty(
|
156
|
-
torch.empty(
|
155
|
+
torch.empty(*kcache.shape, device=kcache.device),
|
156
|
+
torch.empty(*vcache.shape, device=vcache.device),
|
157
157
|
)
|
158
158
|
|
159
159
|
@register_fake("rbln_custom_ops::attn_decode_add_softmax")
|
160
160
|
def attn_decode_add_softmax_abstract(q, k, v, m, kcache, vcache, seq, partition):
|
161
161
|
return (
|
162
162
|
q,
|
163
|
-
torch.empty(
|
164
|
-
torch.empty(
|
163
|
+
torch.empty(*kcache.shape, device=kcache.device),
|
164
|
+
torch.empty(*vcache.shape, device=vcache.device),
|
165
165
|
)
|
166
166
|
|
167
167
|
torch.library.define(
|
@@ -35,6 +35,7 @@ _import_structure = {
|
|
35
35
|
"RBLNBartForConditionalGeneration",
|
36
36
|
"RBLNBartModel",
|
37
37
|
"RBLNBertModel",
|
38
|
+
"RBLNBertForMaskedLM",
|
38
39
|
"RBLNBertForQuestionAnswering",
|
39
40
|
"RBLNCLIPTextModel",
|
40
41
|
"RBLNCLIPTextModelWithProjection",
|
@@ -92,6 +93,7 @@ if TYPE_CHECKING:
|
|
92
93
|
RBLNAutoModelForVision2Seq,
|
93
94
|
RBLNBartForConditionalGeneration,
|
94
95
|
RBLNBartModel,
|
96
|
+
RBLNBertForMaskedLM,
|
95
97
|
RBLNBertForQuestionAnswering,
|
96
98
|
RBLNBertModel,
|
97
99
|
RBLNCLIPTextModel,
|
{optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/src/optimum/rbln/transformers/models/__init__.py
RENAMED
@@ -33,7 +33,7 @@ _import_structure = {
|
|
33
33
|
"RBLNAutoModelForVision2Seq",
|
34
34
|
],
|
35
35
|
"bart": ["RBLNBartForConditionalGeneration", "RBLNBartModel"],
|
36
|
-
"bert": ["RBLNBertModel", "RBLNBertForQuestionAnswering"],
|
36
|
+
"bert": ["RBLNBertModel", "RBLNBertForQuestionAnswering", "RBLNBertForMaskedLM"],
|
37
37
|
"clip": ["RBLNCLIPTextModel", "RBLNCLIPTextModelWithProjection", "RBLNCLIPVisionModel"],
|
38
38
|
"dpt": ["RBLNDPTForDepthEstimation"],
|
39
39
|
"exaone": ["RBLNExaoneForCausalLM"],
|
@@ -67,7 +67,7 @@ if TYPE_CHECKING:
|
|
67
67
|
RBLNAutoModelForVision2Seq,
|
68
68
|
)
|
69
69
|
from .bart import RBLNBartForConditionalGeneration, RBLNBartModel
|
70
|
-
from .bert import RBLNBertForQuestionAnswering, RBLNBertModel
|
70
|
+
from .bert import RBLNBertForMaskedLM, RBLNBertForQuestionAnswering, RBLNBertModel
|
71
71
|
from .clip import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection, RBLNCLIPVisionModel
|
72
72
|
from .dpt import RBLNDPTForDepthEstimation
|
73
73
|
from .exaone import RBLNExaoneForCausalLM
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
2
|
+
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at:
|
6
|
+
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
from .modeling_bert import RBLNBertForMaskedLM, RBLNBertForQuestionAnswering, RBLNBertModel
|
@@ -20,7 +20,7 @@ from transformers import PretrainedConfig
|
|
20
20
|
|
21
21
|
from ....modeling import RBLNModel
|
22
22
|
from ....modeling_config import RBLNCompileConfig, RBLNConfig
|
23
|
-
from ...modeling_generic import RBLNModelForQuestionAnswering
|
23
|
+
from ...modeling_generic import RBLNModelForMaskedLM, RBLNModelForQuestionAnswering
|
24
24
|
|
25
25
|
|
26
26
|
logger = logging.getLogger(__name__)
|
@@ -100,5 +100,9 @@ class RBLNBertModel(RBLNModel):
|
|
100
100
|
return rbln_config
|
101
101
|
|
102
102
|
|
103
|
+
class RBLNBertForMaskedLM(RBLNModelForMaskedLM):
|
104
|
+
rbln_model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
|
105
|
+
|
106
|
+
|
103
107
|
class RBLNBertForQuestionAnswering(RBLNModelForQuestionAnswering):
|
104
108
|
rbln_model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
|
@@ -544,15 +544,19 @@ class DecoderOnlyAttention(nn.Module):
|
|
544
544
|
super().__init__()
|
545
545
|
self._original_mod = self_attn
|
546
546
|
self.layer_idx = self_attn.layer_idx
|
547
|
-
self.num_heads = self._original_mod
|
547
|
+
self.num_heads = getattr(self._original_mod, "num_heads", None) or getattr(
|
548
|
+
self._original_mod.config, "num_attention_heads"
|
549
|
+
)
|
548
550
|
self.head_dim = self._original_mod.head_dim
|
549
551
|
self._phase = "prefill"
|
550
552
|
self.scale = torch.tensor(self.get_attn_scale())
|
551
553
|
|
552
554
|
if hasattr(self._original_mod, "num_key_value_heads"):
|
553
555
|
self.num_key_value_heads = self._original_mod.num_key_value_heads
|
556
|
+
elif hasattr(self._original_mod, "config") and hasattr(self._original_mod.config, "num_key_value_heads"):
|
557
|
+
self.num_key_value_heads = self._original_mod.config.num_key_value_heads
|
554
558
|
else:
|
555
|
-
self.num_key_value_heads = self.
|
559
|
+
self.num_key_value_heads = self.num_heads
|
556
560
|
|
557
561
|
self.attention = self.get_attention()
|
558
562
|
self.__post_init__()
|
@@ -420,7 +420,7 @@ class Seq2SeqSelfAttention(nn.Module):
|
|
420
420
|
pass
|
421
421
|
|
422
422
|
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int) -> torch.Tensor:
|
423
|
-
return tensor.view(bsz,
|
423
|
+
return tensor.view(bsz, seq_len, 1, self.num_heads, self.head_dim).transpose(1, 3)
|
424
424
|
|
425
425
|
def projection(self, hidden_states) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
426
426
|
"""Projects input hidden states into query, key, and value representations.
|
@@ -450,38 +450,21 @@ class Seq2SeqSelfAttention(nn.Module):
|
|
450
450
|
key_states = self._shape(key_states, -1, bsz)
|
451
451
|
value_states = self._shape(value_states, -1, bsz)
|
452
452
|
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
query_state,
|
466
|
-
key_state,
|
467
|
-
value_state,
|
468
|
-
attn_mask,
|
469
|
-
past_key_state,
|
470
|
-
past_value_state,
|
471
|
-
cache_position[b_idx][0],
|
472
|
-
torch.tensor(1.0, dtype=torch.float32), # scale
|
473
|
-
)
|
474
|
-
|
475
|
-
attn_output = attn_output.view(1, self.num_heads, -1, self.head_dim).transpose(1, 2)
|
476
|
-
attn_output = attn_output.reshape(1, -1, self.num_heads * self.head_dim)
|
477
|
-
|
478
|
-
all_key_states.append(key_state.squeeze(2))
|
479
|
-
all_value_states.append(value_state.squeeze(2))
|
480
|
-
all_attn_output.append(attn_output)
|
453
|
+
attn_output, key_states, value_states = self.attn_decode(
|
454
|
+
query_states,
|
455
|
+
key_states,
|
456
|
+
value_states,
|
457
|
+
attention_mask.unsqueeze(
|
458
|
+
2
|
459
|
+
), # Unsqueeze group axis since CustomKernel expects it for group query attention
|
460
|
+
past_key_value[0].view(bsz, self.num_heads, 1, -1, self.head_dim),
|
461
|
+
past_key_value[1].view(bsz, self.num_heads, 1, -1, self.head_dim),
|
462
|
+
cache_position.squeeze(1),
|
463
|
+
torch.tensor(1.0, dtype=torch.float32), # scale
|
464
|
+
)
|
481
465
|
|
482
|
-
|
483
|
-
|
484
|
-
attn_output = torch.cat(all_attn_output, dim=0)
|
466
|
+
attn_output = attn_output.view(bsz, self.num_heads, -1, self.head_dim).transpose(1, 2)
|
467
|
+
attn_output = attn_output.reshape(bsz, -1, self.num_heads * self.head_dim)
|
485
468
|
|
486
469
|
attn_output = self.out_proj(attn_output)
|
487
470
|
present_key_value = (key_states, value_states)
|
@@ -147,6 +147,11 @@ class T5CrossAttention(nn.Module):
|
|
147
147
|
def __init__(self, attn):
|
148
148
|
super().__init__()
|
149
149
|
self.attn = attn
|
150
|
+
self.q = attn.q
|
151
|
+
self.o = attn.o
|
152
|
+
self.n_heads = attn.n_heads
|
153
|
+
self.key_value_proj_dim = attn.key_value_proj_dim
|
154
|
+
self.inner_dim = attn.inner_dim
|
150
155
|
|
151
156
|
def forward(
|
152
157
|
self,
|
@@ -155,9 +160,27 @@ class T5CrossAttention(nn.Module):
|
|
155
160
|
attention_mask: torch.Tensor = None,
|
156
161
|
key_value_states: torch.Tensor = None,
|
157
162
|
):
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
163
|
+
batch_size = hidden_states.shape[0]
|
164
|
+
|
165
|
+
query_states = self.q(hidden_states)
|
166
|
+
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
|
167
|
+
|
168
|
+
# reuse k,v, cross_attentions
|
169
|
+
key_states = past_key_value[0]
|
170
|
+
value_states = past_key_value[1]
|
171
|
+
|
172
|
+
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
|
173
|
+
scores = torch.matmul(query_states, key_states.transpose(3, 2))
|
174
|
+
scores += attention_mask
|
175
|
+
|
176
|
+
# (batch_size, n_heads, seq_length, key_length)
|
177
|
+
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
|
178
|
+
attn_output = torch.matmul(attn_weights, value_states)
|
179
|
+
|
180
|
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
181
|
+
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
|
182
|
+
attn_output = self.o(attn_output)
|
183
|
+
|
184
|
+
outputs = (attn_output, past_key_value)
|
185
|
+
|
186
|
+
return outputs
|
@@ -47,7 +47,7 @@ class LLMTest:
|
|
47
47
|
|
48
48
|
def get_inputs(self):
|
49
49
|
inputs = self.tokenizer(self.PROMPT, return_tensors="pt")
|
50
|
-
inputs["max_new_tokens"] =
|
50
|
+
inputs["max_new_tokens"] = 20
|
51
51
|
inputs["do_sample"] = False
|
52
52
|
return inputs
|
53
53
|
|
@@ -62,7 +62,7 @@ class LLMTest:
|
|
62
62
|
class TestQwen2Model(LLMTest.TestLLM):
|
63
63
|
RBLN_CLASS = RBLNQwen2ForCausalLM
|
64
64
|
HF_MODEL_ID = "Qwen/Qwen2-0.5B-Instruct"
|
65
|
-
EXPECTED_OUTPUT = " I am a 30-year-old woman who has been living with lupus"
|
65
|
+
EXPECTED_OUTPUT = " I am a 30-year-old woman who has been living with lupus for over 1"
|
66
66
|
HF_CONFIG_KWARGS = {"max_position_embeddings": 1024}
|
67
67
|
|
68
68
|
|
@@ -70,7 +70,7 @@ class TestLlamaForCausalLM(LLMTest.TestLLM):
|
|
70
70
|
RBLN_CLASS = RBLNLlamaForCausalLM
|
71
71
|
HF_MODEL_ID = "afmck/testing-llama-tiny"
|
72
72
|
TEST_LEVEL = TestLevel.ESSENTIAL
|
73
|
-
EXPECTED_OUTPUT = "reress makefable R����
|
73
|
+
EXPECTED_OUTPUT = "reress makefable R���� noethetsshss rechoolso�"
|
74
74
|
HF_CONFIG_KWARGS = {"num_hidden_layers": 1, "max_position_embeddings": 1024}
|
75
75
|
|
76
76
|
def get_inputs(self):
|
@@ -83,7 +83,7 @@ class TestLlamaForCausalLM_Flash(LLMTest.TestLLM):
|
|
83
83
|
RBLN_CLASS = RBLNLlamaForCausalLM
|
84
84
|
HF_MODEL_ID = "afmck/testing-llama-tiny"
|
85
85
|
TEST_LEVEL = TestLevel.ESSENTIAL
|
86
|
-
EXPECTED_OUTPUT = "reress makefable R����
|
86
|
+
EXPECTED_OUTPUT = "reress makefable R���� noethetsshss rechoolso�"
|
87
87
|
HF_CONFIG_KWARGS = {"num_hidden_layers": 1, "max_position_embeddings": 8192}
|
88
88
|
RBLN_CLASS_KWARGS = {"rbln_config": {"attn_impl": "flash_attn", "kvcache_partition_len": 4096}}
|
89
89
|
|
@@ -96,7 +96,9 @@ class TestLlamaForCausalLM_Flash(LLMTest.TestLLM):
|
|
96
96
|
class TestGPT2LMHeadModel(LLMTest.TestLLM):
|
97
97
|
RBLN_CLASS = RBLNGPT2LMHeadModel
|
98
98
|
# TEST_LEVEL = TestLevel.FULL
|
99
|
-
EXPECTED_OUTPUT =
|
99
|
+
EXPECTED_OUTPUT = (
|
100
|
+
" What kind kind kind kind kind kind kind kind kind kind kind kind kind kind kind kind kind kind kind"
|
101
|
+
)
|
100
102
|
HF_MODEL_ID = "openai-community/gpt2"
|
101
103
|
HF_CONFIG_KWARGS = {"n_layer": 1, "max_position_embeddings": 1024}
|
102
104
|
|
@@ -106,7 +108,7 @@ class TestPhiForCausalLM(LLMTest.TestLLM):
|
|
106
108
|
|
107
109
|
# HF_MODEL_ID = "hf-internal-testing/tiny-random-PhiForCausalLM"
|
108
110
|
HF_MODEL_ID = "microsoft/phi-2"
|
109
|
-
EXPECTED_OUTPUT = "\nAnswer: Theorettebrates']['<<<
|
111
|
+
EXPECTED_OUTPUT = "\nAnswer: Theorettebrates']['<<<urlskolegateezzingrill"
|
110
112
|
HF_CONFIG_KWARGS = {"num_hidden_layers": 1, "max_position_embeddings": 1024, "trust_remote_code": True}
|
111
113
|
|
112
114
|
|
@@ -114,7 +116,7 @@ class TestExaoneForCausalLM(LLMTest.TestLLM):
|
|
114
116
|
RBLN_CLASS = RBLNExaoneForCausalLM
|
115
117
|
# HF_MODEL_ID = "katuni4ka/tiny-random-exaone"
|
116
118
|
HF_MODEL_ID = "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct"
|
117
|
-
EXPECTED_OUTPUT = "
|
119
|
+
EXPECTED_OUTPUT = "????????????????????"
|
118
120
|
HF_CONFIG_KWARGS = {"num_hidden_layers": 1, "max_position_embeddings": 1024, "trust_remote_code": True}
|
119
121
|
|
120
122
|
|
@@ -134,7 +136,7 @@ class TestT5Model(LLMTest.TestLLM):
|
|
134
136
|
inputs = self.tokenizer(
|
135
137
|
self.PROMPT, padding="max_length", max_length=512, truncation=True, return_tensors="pt"
|
136
138
|
)
|
137
|
-
inputs["max_new_tokens"] =
|
139
|
+
inputs["max_new_tokens"] = 20
|
138
140
|
inputs["do_sample"] = False
|
139
141
|
inputs["num_beams"] = 1
|
140
142
|
return inputs
|
@@ -158,16 +160,14 @@ class TestBartModel(LLMTest.TestLLM):
|
|
158
160
|
}
|
159
161
|
RBLN_CLASS_KWARGS = {"rbln_config": {"enc_max_seq_len": 512, "dec_max_seq_len": 512}}
|
160
162
|
PROMPT = "summarize: studies have shown that owning a dog is good for you"
|
161
|
-
EXPECTED_OUTPUT =
|
162
|
-
"InsteadInsteadInsteadHoweverHoweverHoweverAlthoughAlthoughAlthoughWhileWhileWhileAlthoughAlthough"
|
163
|
-
)
|
163
|
+
EXPECTED_OUTPUT = "InsteadInsteadInsteadHoweverHoweverHoweverAlthoughAlthoughAlthoughWhileWhileWhileAlthoughAlthoughHoweverHoweverManyMany"
|
164
164
|
TEST_LEVEL = TestLevel.ESSENTIAL
|
165
165
|
|
166
166
|
def get_inputs(self):
|
167
167
|
inputs = self.tokenizer(
|
168
168
|
self.PROMPT, padding="max_length", max_length=512, truncation=True, return_tensors="pt"
|
169
169
|
)
|
170
|
-
inputs["max_new_tokens"] =
|
170
|
+
inputs["max_new_tokens"] = 20
|
171
171
|
inputs["do_sample"] = False
|
172
172
|
inputs["num_beams"] = 1
|
173
173
|
return inputs
|
@@ -7,6 +7,7 @@ from transformers import T5EncoderModel
|
|
7
7
|
|
8
8
|
from optimum.rbln import (
|
9
9
|
RBLNASTForAudioClassification,
|
10
|
+
RBLNBertForMaskedLM,
|
10
11
|
RBLNBertForQuestionAnswering,
|
11
12
|
RBLNCLIPTextModel,
|
12
13
|
RBLNDPTForDepthEstimation,
|
@@ -23,6 +24,7 @@ from optimum.rbln.transformers.models.auto.modeling_auto import (
|
|
23
24
|
RBLNAutoModelForCTC,
|
24
25
|
RBLNAutoModelForDepthEstimation,
|
25
26
|
RBLNAutoModelForImageClassification,
|
27
|
+
RBLNAutoModelForMaskedLM,
|
26
28
|
RBLNAutoModelForQuestionAnswering,
|
27
29
|
RBLNAutoModelForSequenceClassification,
|
28
30
|
RBLNAutoModelForSpeechSeq2Seq,
|
@@ -98,6 +100,17 @@ class TestBertModel(BaseTest.TestModel):
|
|
98
100
|
}
|
99
101
|
|
100
102
|
|
103
|
+
class TestBertForMaskedLM(BaseTest.TestModel):
|
104
|
+
RBLN_AUTO_CLASS = RBLNAutoModelForMaskedLM
|
105
|
+
RBLN_CLASS = RBLNBertForMaskedLM
|
106
|
+
HF_MODEL_ID = "hf-internal-testing/tiny-random-BertForMaskedLM"
|
107
|
+
GENERATION_KWARGS = {
|
108
|
+
"input_ids": RANDOM_INPUT_IDS,
|
109
|
+
"attention_mask": RANDOM_ATTN_MASK,
|
110
|
+
"token_type_ids": RANDOM_TOKEN_TYPE_IDS,
|
111
|
+
}
|
112
|
+
|
113
|
+
|
101
114
|
class TestDPTModel(BaseTest.TestModel):
|
102
115
|
RBLN_AUTO_CLASS = RBLNAutoModelForDepthEstimation
|
103
116
|
RBLN_CLASS = RBLNDPTForDepthEstimation
|
@@ -996,7 +996,7 @@ requires-dist = [
|
|
996
996
|
{ name = "torchaudio", marker = "sys_platform == 'darwin'", specifier = "<=2.5.1" },
|
997
997
|
{ name = "torchvision", marker = "sys_platform != 'darwin'", specifier = "<=0.20.1", index = "https://download.pytorch.org/whl/cpu" },
|
998
998
|
{ name = "torchvision", marker = "sys_platform == 'darwin'", specifier = "<=0.20.1" },
|
999
|
-
{ name = "transformers", specifier = "==4.
|
999
|
+
{ name = "transformers", specifier = "==4.48.3" },
|
1000
1000
|
]
|
1001
1001
|
|
1002
1002
|
[package.metadata.requires-dev]
|
@@ -1637,55 +1637,23 @@ wheels = [
|
|
1637
1637
|
|
1638
1638
|
[[package]]
|
1639
1639
|
name = "tokenizers"
|
1640
|
-
version = "0.
|
1640
|
+
version = "0.21.0"
|
1641
1641
|
source = { registry = "https://pypi.org/simple" }
|
1642
1642
|
dependencies = [
|
1643
1643
|
{ name = "huggingface-hub", marker = "sys_platform == 'linux'" },
|
1644
1644
|
]
|
1645
|
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
1646
|
-
wheels = [
|
1647
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1648
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1649
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1650
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1651
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1652
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1653
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1654
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1655
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1656
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1657
|
-
{ url = "https://files.pythonhosted.org/packages/1a/74/62ad983e8ea6a63e04ed9c5be0b605056bf8aac2f0125f9b5e0b3e2b89fa/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb", size = 3086138 },
|
1658
|
-
{ url = "https://files.pythonhosted.org/packages/6b/ac/4637ba619db25094998523f9e6f5b456e1db1f8faa770a3d925d436db0c3/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1", size = 3098076 },
|
1659
|
-
{ url = "https://files.pythonhosted.org/packages/58/ce/9793f2dc2ce529369807c9c74e42722b05034af411d60f5730b720388c7d/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da", size = 3379650 },
|
1660
|
-
{ url = "https://files.pythonhosted.org/packages/50/f6/2841de926bc4118af996eaf0bdf0ea5b012245044766ffc0347e6c968e63/tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907", size = 2994005 },
|
1661
|
-
{ url = "https://files.pythonhosted.org/packages/a3/b2/00915c4fed08e9505d37cf6eaab45b12b4bff8f6719d459abcb9ead86a4b/tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a", size = 8977488 },
|
1662
|
-
{ url = "https://files.pythonhosted.org/packages/e9/ac/1c069e7808181ff57bcf2d39e9b6fbee9133a55410e6ebdaa89f67c32e83/tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c", size = 9294935 },
|
1663
|
-
{ url = "https://files.pythonhosted.org/packages/27/37/d108df55daf4f0fcf1f58554692ff71687c273d870a34693066f0847be96/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64", size = 2898389 },
|
1664
|
-
{ url = "https://files.pythonhosted.org/packages/b2/27/32f29da16d28f59472fa7fb38e7782069748c7e9ab9854522db20341624c/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64", size = 2795866 },
|
1665
|
-
{ url = "https://files.pythonhosted.org/packages/29/4e/8a9a3c89e128c4a40f247b501c10279d2d7ade685953407c4d94c8c0f7a7/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d", size = 3085446 },
|
1666
|
-
{ url = "https://files.pythonhosted.org/packages/b4/3b/a2a7962c496ebcd95860ca99e423254f760f382cd4bd376f8895783afaf5/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f", size = 3094378 },
|
1667
|
-
{ url = "https://files.pythonhosted.org/packages/1f/f4/a8a33f0192a1629a3bd0afcad17d4d221bbf9276da4b95d226364208d5eb/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f", size = 3385755 },
|
1668
|
-
{ url = "https://files.pythonhosted.org/packages/9e/65/c83cb3545a65a9eaa2e13b22c93d5e00bd7624b354a44adbdc93d5d9bd91/tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad", size = 2997679 },
|
1669
|
-
{ url = "https://files.pythonhosted.org/packages/55/e9/a80d4e592307688a67c7c59ab77e03687b6a8bd92eb5db763a2c80f93f57/tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5", size = 8989296 },
|
1670
|
-
{ url = "https://files.pythonhosted.org/packages/90/af/60c957af8d2244321124e893828f1a4817cde1a2d08d09d423b73f19bd2f/tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2", size = 9303621 },
|
1671
|
-
{ url = "https://files.pythonhosted.org/packages/b0/39/073836c1d73e63268b1c67a682a8ba23e2688a43e737166be45ab8243701/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a", size = 2893676 },
|
1672
|
-
{ url = "https://files.pythonhosted.org/packages/c1/d9/b9ff819c3df4bc73ad93629804f7b85321a78bc2da4f54fb774a90e995c6/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51", size = 2804173 },
|
1673
|
-
{ url = "https://files.pythonhosted.org/packages/3e/d5/6b2b519ba2d9a6d3435f22918f0ad5850c40cf5357f6d989e6d68ef40fb9/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d", size = 3086866 },
|
1674
|
-
{ url = "https://files.pythonhosted.org/packages/01/e1/d96e90ef872dd9b3a4b7a78874411f1c48476019f95a87a2cfd54c470a57/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4", size = 3099004 },
|
1675
|
-
{ url = "https://files.pythonhosted.org/packages/0c/6a/a94248dc5915907e18d55c9739cd018f5aeb4146f198622f45f9748dcb9f/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0", size = 3381574 },
|
1676
|
-
{ url = "https://files.pythonhosted.org/packages/29/9e/c95f8821d6bc93eba7c5db95e6299c009db523d1c646da8563b42ad892c4/tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248", size = 2994953 },
|
1677
|
-
{ url = "https://files.pythonhosted.org/packages/95/ff/01fdcf9a77776730baf63a9f66adf75c3aa4bdb1bdc77c7d1a3e03b2a25e/tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75", size = 8977698 },
|
1678
|
-
{ url = "https://files.pythonhosted.org/packages/ef/2d/8b823741c64e9726b82076fa09f6d66285b61bd2c77e109871415b1ed9e2/tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921", size = 9295649 },
|
1679
|
-
{ url = "https://files.pythonhosted.org/packages/8e/c1/6af62ef61316f33ecf785bbb2bee4292f34ea62b491d4480ad9b09acf6b6/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df", size = 2897936 },
|
1680
|
-
{ url = "https://files.pythonhosted.org/packages/9a/0b/c076b2ff3ee6dc70c805181fbe325668b89cfee856f8dfa24cc9aa293c84/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee", size = 3082688 },
|
1681
|
-
{ url = "https://files.pythonhosted.org/packages/0a/60/56510124933136c2e90879e1c81603cfa753ae5a87830e3ef95056b20d8f/tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5", size = 2998924 },
|
1682
|
-
{ url = "https://files.pythonhosted.org/packages/68/60/4107b618b7b9155cb34ad2e0fc90946b7e71f041b642122fb6314f660688/tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b", size = 8989514 },
|
1683
|
-
{ url = "https://files.pythonhosted.org/packages/e8/bd/48475818e614b73316baf37ac1e4e51b578bbdf58651812d7e55f43b88d8/tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd", size = 9303476 },
|
1684
|
-
{ url = "https://files.pythonhosted.org/packages/ce/32/37ff2ced2c169c2e7586fcd51314f59d02c60fd2eeafea527c2f9d1bb512/tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac", size = 2897613 },
|
1685
|
-
{ url = "https://files.pythonhosted.org/packages/79/e4/fdd7ad2aedaa4a3f148aa28670bf0b0856211a3fec3e6554ed6ceec9a928/tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4", size = 3085434 },
|
1686
|
-
{ url = "https://files.pythonhosted.org/packages/e0/b8/479ab7349faf1da001b861ea521055ad18a34a9b1053079e0c9b5c476f50/tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958", size = 2998651 },
|
1687
|
-
{ url = "https://files.pythonhosted.org/packages/6b/7f/3a1d5ded5f841764d67aa4c6e2e4b40d9dac5fbd2df135bccc58284a6917/tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627", size = 8989010 },
|
1688
|
-
{ url = "https://files.pythonhosted.org/packages/2b/a7/e0b5d5fea8cb69afdbab3c0e0cc3a02b5dd888ce0f933312f7c0ca6b017e/tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb", size = 9303287 },
|
1645
|
+
sdist = { url = "https://files.pythonhosted.org/packages/20/41/c2be10975ca37f6ec40d7abd7e98a5213bb04f284b869c1a24e6504fd94d/tokenizers-0.21.0.tar.gz", hash = "sha256:ee0894bf311b75b0c03079f33859ae4b2334d675d4e93f5a4132e1eae2834fe4", size = 343021 }
|
1646
|
+
wheels = [
|
1647
|
+
{ url = "https://files.pythonhosted.org/packages/f7/14/83429177c19364df27d22bc096d4c2e431e0ba43e56c525434f1f9b0fd00/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b177fb54c4702ef611de0c069d9169f0004233890e0c4c5bd5508ae05abf193", size = 2903304 },
|
1648
|
+
{ url = "https://files.pythonhosted.org/packages/7e/db/3433eab42347e0dc5452d8fcc8da03f638c9accffefe5a7c78146666964a/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b43779a269f4629bebb114e19c3fca0223296ae9fea8bb9a7a6c6fb0657ff8e", size = 2804378 },
|
1649
|
+
{ url = "https://files.pythonhosted.org/packages/57/8b/7da5e6f89736c2ade02816b4733983fca1c226b0c42980b1ae9dc8fcf5cc/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9aeb255802be90acfd363626753fda0064a8df06031012fe7d52fd9a905eb00e", size = 3095488 },
|
1650
|
+
{ url = "https://files.pythonhosted.org/packages/4d/f6/5ed6711093dc2c04a4e03f6461798b12669bc5a17c8be7cce1240e0b5ce8/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8b09dbeb7a8d73ee204a70f94fc06ea0f17dcf0844f16102b9f414f0b7463ba", size = 3121410 },
|
1651
|
+
{ url = "https://files.pythonhosted.org/packages/81/42/07600892d48950c5e80505b81411044a2d969368cdc0d929b1c847bf6697/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:400832c0904f77ce87c40f1a8a27493071282f785724ae62144324f171377273", size = 3388821 },
|
1652
|
+
{ url = "https://files.pythonhosted.org/packages/22/06/69d7ce374747edaf1695a4f61b83570d91cc8bbfc51ccfecf76f56ab4aac/tokenizers-0.21.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84ca973b3a96894d1707e189c14a774b701596d579ffc7e69debfc036a61a04", size = 3008868 },
|
1653
|
+
{ url = "https://files.pythonhosted.org/packages/c8/69/54a0aee4d576045b49a0eb8bffdc495634309c823bf886042e6f46b80058/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:eb7202d231b273c34ec67767378cd04c767e967fda12d4a9e36208a34e2f137e", size = 8975831 },
|
1654
|
+
{ url = "https://files.pythonhosted.org/packages/f7/f3/b776061e4f3ebf2905ba1a25d90380aafd10c02d406437a8ba22d1724d76/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:089d56db6782a73a27fd8abf3ba21779f5b85d4a9f35e3b493c7bbcbbf0d539b", size = 8920746 },
|
1655
|
+
{ url = "https://files.pythonhosted.org/packages/d8/ee/ce83d5ec8b6844ad4c3ecfe3333d58ecc1adc61f0878b323a15355bcab24/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:c87ca3dc48b9b1222d984b6b7490355a6fdb411a2d810f6f05977258400ddb74", size = 9161814 },
|
1656
|
+
{ url = "https://files.pythonhosted.org/packages/18/07/3e88e65c0ed28fa93aa0c4d264988428eef3df2764c3126dc83e243cb36f/tokenizers-0.21.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4145505a973116f91bc3ac45988a92e618a6f83eb458f49ea0790df94ee243ff", size = 9357138 },
|
1689
1657
|
]
|
1690
1658
|
|
1691
1659
|
[[package]]
|
@@ -1869,7 +1837,7 @@ wheels = [
|
|
1869
1837
|
|
1870
1838
|
[[package]]
|
1871
1839
|
name = "transformers"
|
1872
|
-
version = "4.
|
1840
|
+
version = "4.48.3"
|
1873
1841
|
source = { registry = "https://pypi.org/simple" }
|
1874
1842
|
dependencies = [
|
1875
1843
|
{ name = "filelock", marker = "sys_platform == 'linux'" },
|
@@ -1884,9 +1852,9 @@ dependencies = [
|
|
1884
1852
|
{ name = "tokenizers", marker = "sys_platform == 'linux'" },
|
1885
1853
|
{ name = "tqdm", marker = "sys_platform == 'linux'" },
|
1886
1854
|
]
|
1887
|
-
sdist = { url = "https://files.pythonhosted.org/packages/
|
1855
|
+
sdist = { url = "https://files.pythonhosted.org/packages/e3/82/cebeb7af5e64440f1638f18c4ed0f89156d0eeaa6290d98da8ca93ac3872/transformers-4.48.3.tar.gz", hash = "sha256:a5e8f1e9a6430aa78215836be70cecd3f872d99eeda300f41ad6cc841724afdb", size = 8373458 }
|
1888
1856
|
wheels = [
|
1889
|
-
{ url = "https://files.pythonhosted.org/packages/
|
1857
|
+
{ url = "https://files.pythonhosted.org/packages/b6/1a/efeecb8d83705f2f4beac98d46f2148c95ecd7babfb31b5c0f1e7017e83d/transformers-4.48.3-py3-none-any.whl", hash = "sha256:78697f990f5ef350c23b46bf86d5081ce96b49479ab180b2de7687267de8fd36", size = 9669412 },
|
1890
1858
|
]
|
1891
1859
|
|
1892
1860
|
[[package]]
|
@@ -1,15 +0,0 @@
|
|
1
|
-
# Copyright 2025 Rebellions Inc. All rights reserved.
|
2
|
-
|
3
|
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
-
# you may not use this file except in compliance with the License.
|
5
|
-
# You may obtain a copy of the License at:
|
6
|
-
|
7
|
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
-
|
9
|
-
# Unless required by applicable law or agreed to in writing, software
|
10
|
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
-
# See the License for the specific language governing permissions and
|
13
|
-
# limitations under the License.
|
14
|
-
|
15
|
-
from .modeling_bert import RBLNBertForQuestionAnswering, RBLNBertModel
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{optimum_rbln-0.2.1a1 → optimum_rbln-0.2.1a2}/.github/workflows/rbln_optimum_inference_test.yaml
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|