optimum-rbln 0.9.3.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of optimum-rbln might be problematic. Click here for more details.
- optimum/rbln/__init__.py +505 -0
- optimum/rbln/__version__.py +34 -0
- optimum/rbln/cli.py +660 -0
- optimum/rbln/configuration_utils.py +968 -0
- optimum/rbln/diffusers/__init__.py +198 -0
- optimum/rbln/diffusers/configurations/__init__.py +37 -0
- optimum/rbln/diffusers/configurations/models/__init__.py +10 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl.py +73 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_cosmos.py +84 -0
- optimum/rbln/diffusers/configurations/models/configuration_autoencoder_kl_temporal_decoder.py +67 -0
- optimum/rbln/diffusers/configurations/models/configuration_controlnet.py +64 -0
- optimum/rbln/diffusers/configurations/models/configuration_prior_transformer.py +59 -0
- optimum/rbln/diffusers/configurations/models/configuration_transformer_cosmos.py +78 -0
- optimum/rbln/diffusers/configurations/models/configuration_transformer_sd3.py +63 -0
- optimum/rbln/diffusers/configurations/models/configuration_unet_2d_condition.py +81 -0
- optimum/rbln/diffusers/configurations/models/configuration_unet_spatio_temporal_condition.py +59 -0
- optimum/rbln/diffusers/configurations/models/configuration_vq_model.py +74 -0
- optimum/rbln/diffusers/configurations/pipelines/__init__.py +34 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_controlnet.py +316 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_cosmos.py +117 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_kandinsky2_2.py +363 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion.py +156 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_3.py +176 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_diffusion_xl.py +159 -0
- optimum/rbln/diffusers/configurations/pipelines/configuration_stable_video_diffusion.py +114 -0
- optimum/rbln/diffusers/modeling_diffusers.py +451 -0
- optimum/rbln/diffusers/models/__init__.py +64 -0
- optimum/rbln/diffusers/models/autoencoders/__init__.py +18 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl.py +255 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_cosmos.py +245 -0
- optimum/rbln/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py +275 -0
- optimum/rbln/diffusers/models/autoencoders/vae.py +178 -0
- optimum/rbln/diffusers/models/autoencoders/vq_model.py +211 -0
- optimum/rbln/diffusers/models/controlnet.py +281 -0
- optimum/rbln/diffusers/models/transformers/__init__.py +17 -0
- optimum/rbln/diffusers/models/transformers/prior_transformer.py +160 -0
- optimum/rbln/diffusers/models/transformers/transformer_cosmos.py +344 -0
- optimum/rbln/diffusers/models/transformers/transformer_sd3.py +191 -0
- optimum/rbln/diffusers/models/unets/__init__.py +16 -0
- optimum/rbln/diffusers/models/unets/unet_2d_condition.py +408 -0
- optimum/rbln/diffusers/models/unets/unet_spatio_temporal_condition.py +201 -0
- optimum/rbln/diffusers/pipelines/__init__.py +113 -0
- optimum/rbln/diffusers/pipelines/auto_pipeline.py +307 -0
- optimum/rbln/diffusers/pipelines/controlnet/__init__.py +19 -0
- optimum/rbln/diffusers/pipelines/controlnet/multicontrolnet.py +139 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet.py +669 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_img2img.py +640 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl.py +825 -0
- optimum/rbln/diffusers/pipelines/controlnet/pipeline_controlnet_sd_xl_img2img.py +837 -0
- optimum/rbln/diffusers/pipelines/cosmos/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/cosmos/configuration_cosmos_guardrail.py +113 -0
- optimum/rbln/diffusers/pipelines/cosmos/cosmos_guardrail.py +425 -0
- optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_text2world.py +128 -0
- optimum/rbln/diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py +128 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/__init__.py +23 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py +207 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_inpaint.py +34 -0
- optimum/rbln/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +32 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/__init__.py +17 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py +31 -0
- optimum/rbln/diffusers/pipelines/stable_video_diffusion/__init__.py +15 -0
- optimum/rbln/diffusers/pipelines/stable_video_diffusion/pipeline_stable_video_diffusion.py +46 -0
- optimum/rbln/modeling.py +364 -0
- optimum/rbln/modeling_base.py +637 -0
- optimum/rbln/ops/__init__.py +19 -0
- optimum/rbln/ops/attn.py +455 -0
- optimum/rbln/ops/flash_attn.py +350 -0
- optimum/rbln/ops/kv_cache_update.py +29 -0
- optimum/rbln/ops/linear.py +32 -0
- optimum/rbln/ops/sliding_window_attn.py +111 -0
- optimum/rbln/transformers/__init__.py +340 -0
- optimum/rbln/transformers/configuration_generic.py +120 -0
- optimum/rbln/transformers/modeling_attention_utils.py +385 -0
- optimum/rbln/transformers/modeling_generic.py +280 -0
- optimum/rbln/transformers/modeling_outputs.py +37 -0
- optimum/rbln/transformers/modeling_rope_utils.py +314 -0
- optimum/rbln/transformers/models/__init__.py +343 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/__init__.py +17 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +47 -0
- optimum/rbln/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +91 -0
- optimum/rbln/transformers/models/auto/__init__.py +31 -0
- optimum/rbln/transformers/models/auto/auto_factory.py +267 -0
- optimum/rbln/transformers/models/auto/modeling_auto.py +162 -0
- optimum/rbln/transformers/models/bart/__init__.py +17 -0
- optimum/rbln/transformers/models/bart/bart_architecture.py +163 -0
- optimum/rbln/transformers/models/bart/configuration_bart.py +36 -0
- optimum/rbln/transformers/models/bart/modeling_bart.py +86 -0
- optimum/rbln/transformers/models/bert/__init__.py +16 -0
- optimum/rbln/transformers/models/bert/bert_architecture.py +16 -0
- optimum/rbln/transformers/models/bert/configuration_bert.py +46 -0
- optimum/rbln/transformers/models/bert/modeling_bert.py +148 -0
- optimum/rbln/transformers/models/blip_2/__init__.py +20 -0
- optimum/rbln/transformers/models/blip_2/configuration_blip_2.py +115 -0
- optimum/rbln/transformers/models/blip_2/modeling_blip_2.py +526 -0
- optimum/rbln/transformers/models/clip/__init__.py +26 -0
- optimum/rbln/transformers/models/clip/configuration_clip.py +103 -0
- optimum/rbln/transformers/models/clip/modeling_clip.py +384 -0
- optimum/rbln/transformers/models/colpali/__init__.py +2 -0
- optimum/rbln/transformers/models/colpali/colpali_architecture.py +218 -0
- optimum/rbln/transformers/models/colpali/configuration_colpali.py +84 -0
- optimum/rbln/transformers/models/colpali/modeling_colpali.py +361 -0
- optimum/rbln/transformers/models/colqwen2/__init__.py +2 -0
- optimum/rbln/transformers/models/colqwen2/colqwen2_architecture.py +233 -0
- optimum/rbln/transformers/models/colqwen2/configuration_colqwen2.py +74 -0
- optimum/rbln/transformers/models/colqwen2/modeling_colqwen2.py +446 -0
- optimum/rbln/transformers/models/decoderonly/__init__.py +27 -0
- optimum/rbln/transformers/models/decoderonly/configuration_decoderonly.py +300 -0
- optimum/rbln/transformers/models/decoderonly/configuration_lora.py +411 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py +1224 -0
- optimum/rbln/transformers/models/decoderonly/decoderonly_runtime_utils.py +508 -0
- optimum/rbln/transformers/models/decoderonly/generation_decoderonly.py +119 -0
- optimum/rbln/transformers/models/decoderonly/lora_architecture.py +204 -0
- optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py +823 -0
- optimum/rbln/transformers/models/depth_anything/__init__.py +16 -0
- optimum/rbln/transformers/models/depth_anything/configuration_depth_anything.py +24 -0
- optimum/rbln/transformers/models/depth_anything/modeling_depth_anything.py +42 -0
- optimum/rbln/transformers/models/distilbert/__init__.py +19 -0
- optimum/rbln/transformers/models/distilbert/configuration_distilbert.py +24 -0
- optimum/rbln/transformers/models/distilbert/modeling_distilbert.py +51 -0
- optimum/rbln/transformers/models/dpt/__init__.py +16 -0
- optimum/rbln/transformers/models/dpt/configuration_dpt.py +24 -0
- optimum/rbln/transformers/models/dpt/modeling_dpt.py +42 -0
- optimum/rbln/transformers/models/exaone/__init__.py +24 -0
- optimum/rbln/transformers/models/exaone/configuration_exaone.py +42 -0
- optimum/rbln/transformers/models/exaone/exaone_architecture.py +77 -0
- optimum/rbln/transformers/models/exaone/modeling_exaone.py +145 -0
- optimum/rbln/transformers/models/gemma/__init__.py +16 -0
- optimum/rbln/transformers/models/gemma/configuration_gemma.py +50 -0
- optimum/rbln/transformers/models/gemma/gemma_architecture.py +27 -0
- optimum/rbln/transformers/models/gemma/modeling_gemma.py +104 -0
- optimum/rbln/transformers/models/gemma3/__init__.py +16 -0
- optimum/rbln/transformers/models/gemma3/configuration_gemma3.py +109 -0
- optimum/rbln/transformers/models/gemma3/gemma3_architecture.py +170 -0
- optimum/rbln/transformers/models/gemma3/gemma3_runtime_utils.py +245 -0
- optimum/rbln/transformers/models/gemma3/modeling_gemma3.py +611 -0
- optimum/rbln/transformers/models/gpt2/__init__.py +16 -0
- optimum/rbln/transformers/models/gpt2/configuration_gpt2.py +50 -0
- optimum/rbln/transformers/models/gpt2/gpt2_architecture.py +93 -0
- optimum/rbln/transformers/models/gpt2/modeling_gpt2.py +55 -0
- optimum/rbln/transformers/models/grounding_dino/__init__.py +10 -0
- optimum/rbln/transformers/models/grounding_dino/configuration_grounding_dino.py +92 -0
- optimum/rbln/transformers/models/grounding_dino/grounding_dino_architecture.py +599 -0
- optimum/rbln/transformers/models/grounding_dino/modeling_grounding_dino.py +1048 -0
- optimum/rbln/transformers/models/idefics3/__init__.py +16 -0
- optimum/rbln/transformers/models/idefics3/configuration_idefics3.py +89 -0
- optimum/rbln/transformers/models/idefics3/modeling_idefics3.py +497 -0
- optimum/rbln/transformers/models/llama/__init__.py +16 -0
- optimum/rbln/transformers/models/llama/configuration_llama.py +50 -0
- optimum/rbln/transformers/models/llama/llama_architecture.py +19 -0
- optimum/rbln/transformers/models/llama/modeling_llama.py +104 -0
- optimum/rbln/transformers/models/llava/__init__.py +16 -0
- optimum/rbln/transformers/models/llava/configuration_llava.py +72 -0
- optimum/rbln/transformers/models/llava/modeling_llava.py +490 -0
- optimum/rbln/transformers/models/llava_next/__init__.py +16 -0
- optimum/rbln/transformers/models/llava_next/configuration_llava_next.py +69 -0
- optimum/rbln/transformers/models/llava_next/modeling_llava_next.py +493 -0
- optimum/rbln/transformers/models/midm/__init__.py +24 -0
- optimum/rbln/transformers/models/midm/configuration_midm.py +42 -0
- optimum/rbln/transformers/models/midm/midm_architecture.py +144 -0
- optimum/rbln/transformers/models/midm/modeling_midm.py +144 -0
- optimum/rbln/transformers/models/mistral/__init__.py +16 -0
- optimum/rbln/transformers/models/mistral/configuration_mistral.py +50 -0
- optimum/rbln/transformers/models/mistral/mistral_architecture.py +19 -0
- optimum/rbln/transformers/models/mistral/modeling_mistral.py +115 -0
- optimum/rbln/transformers/models/opt/__init__.py +16 -0
- optimum/rbln/transformers/models/opt/configuration_opt.py +29 -0
- optimum/rbln/transformers/models/opt/modeling_opt.py +102 -0
- optimum/rbln/transformers/models/opt/opt_architecture.py +74 -0
- optimum/rbln/transformers/models/pegasus/__init__.py +17 -0
- optimum/rbln/transformers/models/pegasus/configuration_pegasus.py +38 -0
- optimum/rbln/transformers/models/pegasus/modeling_pegasus.py +71 -0
- optimum/rbln/transformers/models/pegasus/pegasus_architecture.py +161 -0
- optimum/rbln/transformers/models/phi/__init__.py +16 -0
- optimum/rbln/transformers/models/phi/configuration_phi.py +50 -0
- optimum/rbln/transformers/models/phi/modeling_phi.py +92 -0
- optimum/rbln/transformers/models/phi/phi_architecture.py +115 -0
- optimum/rbln/transformers/models/pixtral/__init__.py +16 -0
- optimum/rbln/transformers/models/pixtral/configuration_pixtral.py +43 -0
- optimum/rbln/transformers/models/pixtral/modeling_pixtral.py +322 -0
- optimum/rbln/transformers/models/pixtral/pixtral_architecture.py +73 -0
- optimum/rbln/transformers/models/qwen2/__init__.py +16 -0
- optimum/rbln/transformers/models/qwen2/configuration_qwen2.py +50 -0
- optimum/rbln/transformers/models/qwen2/modeling_qwen2.py +123 -0
- optimum/rbln/transformers/models/qwen2/qwen2_architecture.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +111 -0
- optimum/rbln/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +636 -0
- optimum/rbln/transformers/models/qwen2_5_vl/qwen2_5_vl_architecture.py +220 -0
- optimum/rbln/transformers/models/qwen2_vl/__init__.py +19 -0
- optimum/rbln/transformers/models/qwen2_vl/configuration_qwen2_vl.py +88 -0
- optimum/rbln/transformers/models/qwen2_vl/modeling_qwen2_vl.py +513 -0
- optimum/rbln/transformers/models/qwen2_vl/qwen2_vl_architecture.py +165 -0
- optimum/rbln/transformers/models/qwen3/__init__.py +16 -0
- optimum/rbln/transformers/models/qwen3/configuration_qwen3.py +71 -0
- optimum/rbln/transformers/models/qwen3/modeling_qwen3.py +133 -0
- optimum/rbln/transformers/models/qwen3/qwen3_architecture.py +31 -0
- optimum/rbln/transformers/models/resnet/__init__.py +23 -0
- optimum/rbln/transformers/models/resnet/configuration_resnet.py +42 -0
- optimum/rbln/transformers/models/resnet/modeling_resnet.py +99 -0
- optimum/rbln/transformers/models/roberta/__init__.py +24 -0
- optimum/rbln/transformers/models/roberta/configuration_roberta.py +33 -0
- optimum/rbln/transformers/models/roberta/modeling_roberta.py +72 -0
- optimum/rbln/transformers/models/seq2seq/__init__.py +16 -0
- optimum/rbln/transformers/models/seq2seq/configuration_seq2seq.py +71 -0
- optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py +477 -0
- optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py +527 -0
- optimum/rbln/transformers/models/siglip/__init__.py +16 -0
- optimum/rbln/transformers/models/siglip/configuration_siglip.py +76 -0
- optimum/rbln/transformers/models/siglip/modeling_siglip.py +199 -0
- optimum/rbln/transformers/models/swin/__init__.py +16 -0
- optimum/rbln/transformers/models/swin/configuration_swin.py +42 -0
- optimum/rbln/transformers/models/swin/modeling_swin.py +354 -0
- optimum/rbln/transformers/models/t5/__init__.py +17 -0
- optimum/rbln/transformers/models/t5/configuration_t5.py +36 -0
- optimum/rbln/transformers/models/t5/modeling_t5.py +130 -0
- optimum/rbln/transformers/models/t5/t5_architecture.py +264 -0
- optimum/rbln/transformers/models/time_series_transformer/__init__.py +26 -0
- optimum/rbln/transformers/models/time_series_transformer/configuration_time_series_transformer.py +41 -0
- optimum/rbln/transformers/models/time_series_transformer/modeling_time_series_transformer.py +435 -0
- optimum/rbln/transformers/models/time_series_transformer/time_series_transformers_architecture.py +337 -0
- optimum/rbln/transformers/models/vit/__init__.py +19 -0
- optimum/rbln/transformers/models/vit/configuration_vit.py +24 -0
- optimum/rbln/transformers/models/vit/modeling_vit.py +44 -0
- optimum/rbln/transformers/models/wav2vec2/__init__.py +16 -0
- optimum/rbln/transformers/models/wav2vec2/configuration_wav2vec2.py +38 -0
- optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py +104 -0
- optimum/rbln/transformers/models/whisper/__init__.py +17 -0
- optimum/rbln/transformers/models/whisper/configuration_whisper.py +72 -0
- optimum/rbln/transformers/models/whisper/generation_whisper.py +159 -0
- optimum/rbln/transformers/models/whisper/modeling_whisper.py +475 -0
- optimum/rbln/transformers/models/whisper/whisper_architecture.py +349 -0
- optimum/rbln/transformers/models/xlm_roberta/__init__.py +24 -0
- optimum/rbln/transformers/models/xlm_roberta/configuration_xlm_roberta.py +32 -0
- optimum/rbln/transformers/models/xlm_roberta/modeling_xlm_roberta.py +82 -0
- optimum/rbln/transformers/utils/__init__.py +0 -0
- optimum/rbln/transformers/utils/rbln_quantization.py +589 -0
- optimum/rbln/transformers/utils/rbln_runtime_wrapper.py +79 -0
- optimum/rbln/utils/__init__.py +16 -0
- optimum/rbln/utils/decorator_utils.py +86 -0
- optimum/rbln/utils/deprecation.py +213 -0
- optimum/rbln/utils/hub.py +94 -0
- optimum/rbln/utils/import_utils.py +170 -0
- optimum/rbln/utils/logging.py +110 -0
- optimum/rbln/utils/model_utils.py +63 -0
- optimum/rbln/utils/runtime_utils.py +249 -0
- optimum/rbln/utils/save_utils.py +102 -0
- optimum/rbln/utils/submodule.py +152 -0
- optimum_rbln-0.9.3.post1.dist-info/METADATA +124 -0
- optimum_rbln-0.9.3.post1.dist-info/RECORD +264 -0
- optimum_rbln-0.9.3.post1.dist-info/WHEEL +4 -0
- optimum_rbln-0.9.3.post1.dist-info/entry_points.txt +2 -0
- optimum_rbln-0.9.3.post1.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,343 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import TYPE_CHECKING
|
|
16
|
+
|
|
17
|
+
from transformers.utils import _LazyModule
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
_import_structure = {
|
|
21
|
+
"audio_spectrogram_transformer": [
|
|
22
|
+
"RBLNASTForAudioClassification",
|
|
23
|
+
"RBLNASTForAudioClassificationConfig",
|
|
24
|
+
],
|
|
25
|
+
"auto": [
|
|
26
|
+
"RBLNAutoModel",
|
|
27
|
+
"RBLNAutoModelForAudioClassification",
|
|
28
|
+
"RBLNAutoModelForCausalLM",
|
|
29
|
+
"RBLNAutoModelForCTC",
|
|
30
|
+
"RBLNAutoModelForDepthEstimation",
|
|
31
|
+
"RBLNAutoModelForImageClassification",
|
|
32
|
+
"RBLNAutoModelForMaskedLM",
|
|
33
|
+
"RBLNAutoModelForQuestionAnswering",
|
|
34
|
+
"RBLNAutoModelForSeq2SeqLM",
|
|
35
|
+
"RBLNAutoModelForSequenceClassification",
|
|
36
|
+
"RBLNAutoModelForSpeechSeq2Seq",
|
|
37
|
+
"RBLNAutoModelForVision2Seq",
|
|
38
|
+
"RBLNAutoModelForImageTextToText",
|
|
39
|
+
"RBLNAutoModelForTextEncoding",
|
|
40
|
+
"RBLNAutoModelForZeroShotObjectDetection",
|
|
41
|
+
],
|
|
42
|
+
"bart": [
|
|
43
|
+
"RBLNBartForConditionalGeneration",
|
|
44
|
+
"RBLNBartModel",
|
|
45
|
+
"RBLNBartForConditionalGenerationConfig",
|
|
46
|
+
"RBLNBartModelConfig",
|
|
47
|
+
],
|
|
48
|
+
"bert": [
|
|
49
|
+
"RBLNBertModel",
|
|
50
|
+
"RBLNBertModelConfig",
|
|
51
|
+
"RBLNBertForQuestionAnswering",
|
|
52
|
+
"RBLNBertForQuestionAnsweringConfig",
|
|
53
|
+
"RBLNBertForMaskedLM",
|
|
54
|
+
"RBLNBertForMaskedLMConfig",
|
|
55
|
+
],
|
|
56
|
+
"blip_2": [
|
|
57
|
+
"RBLNBlip2VisionModelConfig",
|
|
58
|
+
"RBLNBlip2VisionModel",
|
|
59
|
+
"RBLNBlip2ForConditionalGeneration",
|
|
60
|
+
"RBLNBlip2ForConditionalGenerationConfig",
|
|
61
|
+
"RBLNBlip2QFormerModel",
|
|
62
|
+
"RBLNBlip2QFormerModelConfig",
|
|
63
|
+
],
|
|
64
|
+
"clip": [
|
|
65
|
+
"RBLNCLIPTextModel",
|
|
66
|
+
"RBLNCLIPTextModelConfig",
|
|
67
|
+
"RBLNCLIPTextModelWithProjection",
|
|
68
|
+
"RBLNCLIPTextModelWithProjectionConfig",
|
|
69
|
+
"RBLNCLIPVisionModel",
|
|
70
|
+
"RBLNCLIPVisionModelConfig",
|
|
71
|
+
"RBLNCLIPVisionModelWithProjection",
|
|
72
|
+
"RBLNCLIPVisionModelWithProjectionConfig",
|
|
73
|
+
],
|
|
74
|
+
"colpali": [
|
|
75
|
+
"RBLNColPaliForRetrieval",
|
|
76
|
+
"RBLNColPaliForRetrievalConfig",
|
|
77
|
+
],
|
|
78
|
+
"colqwen2": [
|
|
79
|
+
"RBLNColQwen2ForRetrieval",
|
|
80
|
+
"RBLNColQwen2ForRetrievalConfig",
|
|
81
|
+
],
|
|
82
|
+
"distilbert": [
|
|
83
|
+
"RBLNDistilBertForQuestionAnswering",
|
|
84
|
+
"RBLNDistilBertForQuestionAnsweringConfig",
|
|
85
|
+
],
|
|
86
|
+
"qwen2_5_vl": [
|
|
87
|
+
"RBLNQwen2_5_VisionTransformerPretrainedModel",
|
|
88
|
+
"RBLNQwen2_5_VisionTransformerPretrainedModelConfig",
|
|
89
|
+
"RBLNQwen2_5_VLForConditionalGeneration",
|
|
90
|
+
"RBLNQwen2_5_VLForConditionalGenerationConfig",
|
|
91
|
+
],
|
|
92
|
+
"qwen2_vl": [
|
|
93
|
+
"RBLNQwen2VisionTransformerPretrainedModel",
|
|
94
|
+
"RBLNQwen2VisionTransformerPretrainedModelConfig",
|
|
95
|
+
"RBLNQwen2VLForConditionalGeneration",
|
|
96
|
+
"RBLNQwen2VLForConditionalGenerationConfig",
|
|
97
|
+
],
|
|
98
|
+
"decoderonly": [
|
|
99
|
+
"RBLNDecoderOnlyModelConfig",
|
|
100
|
+
"RBLNDecoderOnlyModel",
|
|
101
|
+
"RBLNDecoderOnlyModelForCausalLM",
|
|
102
|
+
"RBLNDecoderOnlyModelForCausalLMConfig",
|
|
103
|
+
"RBLNLoRAAdapterConfig",
|
|
104
|
+
"RBLNLoRAConfig",
|
|
105
|
+
],
|
|
106
|
+
"depth_anything": ["RBLNDepthAnythingForDepthEstimationConfig", "RBLNDepthAnythingForDepthEstimation"],
|
|
107
|
+
"dpt": [
|
|
108
|
+
"RBLNDPTForDepthEstimation",
|
|
109
|
+
"RBLNDPTForDepthEstimationConfig",
|
|
110
|
+
],
|
|
111
|
+
"exaone": ["RBLNExaoneForCausalLM", "RBLNExaoneForCausalLMConfig"],
|
|
112
|
+
"gemma": ["RBLNGemmaForCausalLM", "RBLNGemmaForCausalLMConfig", "RBLNGemmaModel", "RBLNGemmaModelConfig"],
|
|
113
|
+
"gemma3": [
|
|
114
|
+
"RBLNGemma3ForCausalLM",
|
|
115
|
+
"RBLNGemma3ForCausalLMConfig",
|
|
116
|
+
"RBLNGemma3ForConditionalGeneration",
|
|
117
|
+
"RBLNGemma3ForConditionalGenerationConfig",
|
|
118
|
+
],
|
|
119
|
+
"gpt2": ["RBLNGPT2LMHeadModel", "RBLNGPT2LMHeadModelConfig", "RBLNGPT2Model", "RBLNGPT2ModelConfig"],
|
|
120
|
+
"idefics3": [
|
|
121
|
+
"RBLNIdefics3VisionTransformer",
|
|
122
|
+
"RBLNIdefics3ForConditionalGeneration",
|
|
123
|
+
"RBLNIdefics3ForConditionalGenerationConfig",
|
|
124
|
+
"RBLNIdefics3VisionTransformerConfig",
|
|
125
|
+
],
|
|
126
|
+
"llava": ["RBLNLlavaForConditionalGeneration", "RBLNLlavaForConditionalGenerationConfig"],
|
|
127
|
+
"llama": ["RBLNLlamaForCausalLM", "RBLNLlamaForCausalLMConfig", "RBLNLlamaModel", "RBLNLlamaModelConfig"],
|
|
128
|
+
"opt": ["RBLNOPTForCausalLM", "RBLNOPTForCausalLMConfig", "RBLNOPTModel", "RBLNOPTModelConfig"],
|
|
129
|
+
"pegasus": [
|
|
130
|
+
"RBLNPegasusForConditionalGeneration",
|
|
131
|
+
"RBLNPegasusModel",
|
|
132
|
+
"RBLNPegasusForConditionalGenerationConfig",
|
|
133
|
+
"RBLNPegasusModelConfig",
|
|
134
|
+
],
|
|
135
|
+
"llava_next": ["RBLNLlavaNextForConditionalGeneration", "RBLNLlavaNextForConditionalGenerationConfig"],
|
|
136
|
+
"midm": ["RBLNMidmLMHeadModel", "RBLNMidmLMHeadModelConfig"],
|
|
137
|
+
"pixtral": ["RBLNPixtralVisionModel", "RBLNPixtralVisionModelConfig"],
|
|
138
|
+
"mistral": [
|
|
139
|
+
"RBLNMistralForCausalLM",
|
|
140
|
+
"RBLNMistralForCausalLMConfig",
|
|
141
|
+
"RBLNMistralModel",
|
|
142
|
+
"RBLNMistralModelConfig",
|
|
143
|
+
],
|
|
144
|
+
"phi": ["RBLNPhiForCausalLM", "RBLNPhiForCausalLMConfig", "RBLNPhiModel", "RBLNPhiModelConfig"],
|
|
145
|
+
"qwen2": ["RBLNQwen2ForCausalLM", "RBLNQwen2ForCausalLMConfig", "RBLNQwen2Model", "RBLNQwen2ModelConfig"],
|
|
146
|
+
"qwen3": ["RBLNQwen3ForCausalLM", "RBLNQwen3ForCausalLMConfig", "RBLNQwen3Model", "RBLNQwen3ModelConfig"],
|
|
147
|
+
"resnet": ["RBLNResNetForImageClassification", "RBLNResNetForImageClassificationConfig"],
|
|
148
|
+
"roberta": [
|
|
149
|
+
"RBLNRobertaForMaskedLM",
|
|
150
|
+
"RBLNRobertaForMaskedLMConfig",
|
|
151
|
+
"RBLNRobertaForSequenceClassification",
|
|
152
|
+
"RBLNRobertaForSequenceClassificationConfig",
|
|
153
|
+
],
|
|
154
|
+
"siglip": [
|
|
155
|
+
"RBLNSiglipVisionModel",
|
|
156
|
+
"RBLNSiglipVisionModelConfig",
|
|
157
|
+
],
|
|
158
|
+
"swin": [
|
|
159
|
+
"RBLNSwinBackbone",
|
|
160
|
+
"RBLNSwinBackboneConfig",
|
|
161
|
+
],
|
|
162
|
+
"time_series_transformer": [
|
|
163
|
+
"RBLNTimeSeriesTransformerForPrediction",
|
|
164
|
+
"RBLNTimeSeriesTransformerForPredictionConfig",
|
|
165
|
+
],
|
|
166
|
+
"t5": [
|
|
167
|
+
"RBLNT5EncoderModel",
|
|
168
|
+
"RBLNT5ForConditionalGeneration",
|
|
169
|
+
"RBLNT5EncoderModelConfig",
|
|
170
|
+
"RBLNT5ForConditionalGenerationConfig",
|
|
171
|
+
],
|
|
172
|
+
"vit": ["RBLNViTForImageClassification", "RBLNViTForImageClassificationConfig"],
|
|
173
|
+
"wav2vec2": ["RBLNWav2Vec2ForCTC", "RBLNWav2Vec2ForCTCConfig"],
|
|
174
|
+
"whisper": ["RBLNWhisperForConditionalGeneration", "RBLNWhisperForConditionalGenerationConfig"],
|
|
175
|
+
"xlm_roberta": [
|
|
176
|
+
"RBLNXLMRobertaModel",
|
|
177
|
+
"RBLNXLMRobertaModelConfig",
|
|
178
|
+
"RBLNXLMRobertaForSequenceClassification",
|
|
179
|
+
"RBLNXLMRobertaForSequenceClassificationConfig",
|
|
180
|
+
],
|
|
181
|
+
"grounding_dino": [
|
|
182
|
+
"RBLNGroundingDinoForObjectDetection",
|
|
183
|
+
"RBLNGroundingDinoForObjectDetectionConfig",
|
|
184
|
+
"RBLNGroundingDinoEncoder",
|
|
185
|
+
"RBLNGroundingDinoEncoderConfig",
|
|
186
|
+
"RBLNGroundingDinoDecoder",
|
|
187
|
+
"RBLNGroundingDinoDecoderConfig",
|
|
188
|
+
],
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if TYPE_CHECKING:
|
|
192
|
+
from .audio_spectrogram_transformer import RBLNASTForAudioClassification, RBLNASTForAudioClassificationConfig
|
|
193
|
+
from .auto import (
|
|
194
|
+
RBLNAutoModel,
|
|
195
|
+
RBLNAutoModelForAudioClassification,
|
|
196
|
+
RBLNAutoModelForCausalLM,
|
|
197
|
+
RBLNAutoModelForCTC,
|
|
198
|
+
RBLNAutoModelForDepthEstimation,
|
|
199
|
+
RBLNAutoModelForImageClassification,
|
|
200
|
+
RBLNAutoModelForImageTextToText,
|
|
201
|
+
RBLNAutoModelForMaskedLM,
|
|
202
|
+
RBLNAutoModelForQuestionAnswering,
|
|
203
|
+
RBLNAutoModelForSeq2SeqLM,
|
|
204
|
+
RBLNAutoModelForSequenceClassification,
|
|
205
|
+
RBLNAutoModelForSpeechSeq2Seq,
|
|
206
|
+
RBLNAutoModelForTextEncoding,
|
|
207
|
+
RBLNAutoModelForVision2Seq,
|
|
208
|
+
RBLNAutoModelForZeroShotObjectDetection,
|
|
209
|
+
)
|
|
210
|
+
from .bart import (
|
|
211
|
+
RBLNBartForConditionalGeneration,
|
|
212
|
+
RBLNBartForConditionalGenerationConfig,
|
|
213
|
+
RBLNBartModel,
|
|
214
|
+
RBLNBartModelConfig,
|
|
215
|
+
)
|
|
216
|
+
from .bert import (
|
|
217
|
+
RBLNBertForMaskedLM,
|
|
218
|
+
RBLNBertForMaskedLMConfig,
|
|
219
|
+
RBLNBertForQuestionAnswering,
|
|
220
|
+
RBLNBertForQuestionAnsweringConfig,
|
|
221
|
+
RBLNBertModel,
|
|
222
|
+
RBLNBertModelConfig,
|
|
223
|
+
)
|
|
224
|
+
from .blip_2 import (
|
|
225
|
+
RBLNBlip2ForConditionalGeneration,
|
|
226
|
+
RBLNBlip2ForConditionalGenerationConfig,
|
|
227
|
+
RBLNBlip2QFormerModel,
|
|
228
|
+
RBLNBlip2QFormerModelConfig,
|
|
229
|
+
RBLNBlip2VisionModel,
|
|
230
|
+
RBLNBlip2VisionModelConfig,
|
|
231
|
+
)
|
|
232
|
+
from .clip import (
|
|
233
|
+
RBLNCLIPTextModel,
|
|
234
|
+
RBLNCLIPTextModelConfig,
|
|
235
|
+
RBLNCLIPTextModelWithProjection,
|
|
236
|
+
RBLNCLIPTextModelWithProjectionConfig,
|
|
237
|
+
RBLNCLIPVisionModel,
|
|
238
|
+
RBLNCLIPVisionModelConfig,
|
|
239
|
+
RBLNCLIPVisionModelWithProjection,
|
|
240
|
+
RBLNCLIPVisionModelWithProjectionConfig,
|
|
241
|
+
)
|
|
242
|
+
from .colpali import RBLNColPaliForRetrieval, RBLNColPaliForRetrievalConfig
|
|
243
|
+
from .colqwen2 import RBLNColQwen2ForRetrieval, RBLNColQwen2ForRetrievalConfig
|
|
244
|
+
from .decoderonly import (
|
|
245
|
+
RBLNDecoderOnlyModel,
|
|
246
|
+
RBLNDecoderOnlyModelConfig,
|
|
247
|
+
RBLNDecoderOnlyModelForCausalLM,
|
|
248
|
+
RBLNDecoderOnlyModelForCausalLMConfig,
|
|
249
|
+
RBLNLoRAAdapterConfig,
|
|
250
|
+
RBLNLoRAConfig,
|
|
251
|
+
)
|
|
252
|
+
from .depth_anything import RBLNDepthAnythingForDepthEstimation, RBLNDepthAnythingForDepthEstimationConfig
|
|
253
|
+
from .distilbert import RBLNDistilBertForQuestionAnswering, RBLNDistilBertForQuestionAnsweringConfig
|
|
254
|
+
from .dpt import RBLNDPTForDepthEstimation, RBLNDPTForDepthEstimationConfig
|
|
255
|
+
from .exaone import RBLNExaoneForCausalLM, RBLNExaoneForCausalLMConfig
|
|
256
|
+
from .gemma import RBLNGemmaForCausalLM, RBLNGemmaForCausalLMConfig, RBLNGemmaModel, RBLNGemmaModelConfig
|
|
257
|
+
from .gemma3 import (
|
|
258
|
+
RBLNGemma3ForCausalLM,
|
|
259
|
+
RBLNGemma3ForCausalLMConfig,
|
|
260
|
+
RBLNGemma3ForConditionalGeneration,
|
|
261
|
+
RBLNGemma3ForConditionalGenerationConfig,
|
|
262
|
+
)
|
|
263
|
+
from .gpt2 import RBLNGPT2LMHeadModel, RBLNGPT2LMHeadModelConfig, RBLNGPT2Model, RBLNGPT2ModelConfig
|
|
264
|
+
from .grounding_dino import (
|
|
265
|
+
RBLNGroundingDinoDecoder,
|
|
266
|
+
RBLNGroundingDinoDecoderConfig,
|
|
267
|
+
RBLNGroundingDinoEncoder,
|
|
268
|
+
RBLNGroundingDinoEncoderConfig,
|
|
269
|
+
RBLNGroundingDinoForObjectDetection,
|
|
270
|
+
RBLNGroundingDinoForObjectDetectionConfig,
|
|
271
|
+
)
|
|
272
|
+
from .idefics3 import (
|
|
273
|
+
RBLNIdefics3ForConditionalGeneration,
|
|
274
|
+
RBLNIdefics3ForConditionalGenerationConfig,
|
|
275
|
+
RBLNIdefics3VisionTransformer,
|
|
276
|
+
RBLNIdefics3VisionTransformerConfig,
|
|
277
|
+
)
|
|
278
|
+
from .llama import RBLNLlamaForCausalLM, RBLNLlamaForCausalLMConfig, RBLNLlamaModel, RBLNLlamaModelConfig
|
|
279
|
+
from .llava import RBLNLlavaForConditionalGeneration, RBLNLlavaForConditionalGenerationConfig
|
|
280
|
+
from .llava_next import RBLNLlavaNextForConditionalGeneration, RBLNLlavaNextForConditionalGenerationConfig
|
|
281
|
+
from .midm import RBLNMidmLMHeadModel, RBLNMidmLMHeadModelConfig
|
|
282
|
+
from .mistral import RBLNMistralForCausalLM, RBLNMistralForCausalLMConfig, RBLNMistralModel, RBLNMistralModelConfig
|
|
283
|
+
from .opt import RBLNOPTForCausalLM, RBLNOPTForCausalLMConfig, RBLNOPTModel, RBLNOPTModelConfig
|
|
284
|
+
from .pegasus import (
|
|
285
|
+
RBLNPegasusForConditionalGeneration,
|
|
286
|
+
RBLNPegasusForConditionalGenerationConfig,
|
|
287
|
+
RBLNPegasusModel,
|
|
288
|
+
RBLNPegasusModelConfig,
|
|
289
|
+
)
|
|
290
|
+
from .phi import RBLNPhiForCausalLM, RBLNPhiForCausalLMConfig, RBLNPhiModel, RBLNPhiModelConfig
|
|
291
|
+
from .pixtral import RBLNPixtralVisionModel, RBLNPixtralVisionModelConfig
|
|
292
|
+
from .qwen2 import RBLNQwen2ForCausalLM, RBLNQwen2ForCausalLMConfig, RBLNQwen2Model, RBLNQwen2ModelConfig
|
|
293
|
+
from .qwen2_5_vl import (
|
|
294
|
+
RBLNQwen2_5_VisionTransformerPretrainedModel,
|
|
295
|
+
RBLNQwen2_5_VisionTransformerPretrainedModelConfig,
|
|
296
|
+
RBLNQwen2_5_VLForConditionalGeneration,
|
|
297
|
+
RBLNQwen2_5_VLForConditionalGenerationConfig,
|
|
298
|
+
)
|
|
299
|
+
from .qwen2_vl import (
|
|
300
|
+
RBLNQwen2VisionTransformerPretrainedModel,
|
|
301
|
+
RBLNQwen2VisionTransformerPretrainedModelConfig,
|
|
302
|
+
RBLNQwen2VLForConditionalGeneration,
|
|
303
|
+
RBLNQwen2VLForConditionalGenerationConfig,
|
|
304
|
+
)
|
|
305
|
+
from .qwen3 import RBLNQwen3ForCausalLM, RBLNQwen3ForCausalLMConfig, RBLNQwen3Model, RBLNQwen3ModelConfig
|
|
306
|
+
from .resnet import RBLNResNetForImageClassification, RBLNResNetForImageClassificationConfig
|
|
307
|
+
from .roberta import (
|
|
308
|
+
RBLNRobertaForMaskedLM,
|
|
309
|
+
RBLNRobertaForMaskedLMConfig,
|
|
310
|
+
RBLNRobertaForSequenceClassification,
|
|
311
|
+
RBLNRobertaForSequenceClassificationConfig,
|
|
312
|
+
)
|
|
313
|
+
from .siglip import RBLNSiglipVisionModel, RBLNSiglipVisionModelConfig
|
|
314
|
+
from .swin import RBLNSwinBackbone, RBLNSwinBackboneConfig
|
|
315
|
+
from .t5 import (
|
|
316
|
+
RBLNT5EncoderModel,
|
|
317
|
+
RBLNT5EncoderModelConfig,
|
|
318
|
+
RBLNT5ForConditionalGeneration,
|
|
319
|
+
RBLNT5ForConditionalGenerationConfig,
|
|
320
|
+
)
|
|
321
|
+
from .time_series_transformer import (
|
|
322
|
+
RBLNTimeSeriesTransformerForPrediction,
|
|
323
|
+
RBLNTimeSeriesTransformerForPredictionConfig,
|
|
324
|
+
)
|
|
325
|
+
from .vit import RBLNViTForImageClassification, RBLNViTForImageClassificationConfig
|
|
326
|
+
from .wav2vec2 import RBLNWav2Vec2ForCTC, RBLNWav2Vec2ForCTCConfig
|
|
327
|
+
from .whisper import RBLNWhisperForConditionalGeneration, RBLNWhisperForConditionalGenerationConfig
|
|
328
|
+
from .xlm_roberta import (
|
|
329
|
+
RBLNXLMRobertaForSequenceClassification,
|
|
330
|
+
RBLNXLMRobertaForSequenceClassificationConfig,
|
|
331
|
+
RBLNXLMRobertaModel,
|
|
332
|
+
RBLNXLMRobertaModelConfig,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
else:
|
|
336
|
+
import sys
|
|
337
|
+
|
|
338
|
+
sys.modules[__name__] = _LazyModule(
|
|
339
|
+
__name__,
|
|
340
|
+
globals()["__file__"],
|
|
341
|
+
_import_structure,
|
|
342
|
+
module_spec=__spec__,
|
|
343
|
+
)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from .configuration_audio_spectrogram_transformer import RBLNASTForAudioClassificationConfig
|
|
17
|
+
from .modeling_audio_spectrogram_transformer import RBLNASTForAudioClassification
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import Any, Optional
|
|
16
|
+
|
|
17
|
+
from ....configuration_utils import RBLNModelConfig
|
|
18
|
+
from ....utils.deprecation import deprecate_kwarg
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RBLNASTForAudioClassificationConfig(RBLNModelConfig):
|
|
22
|
+
"""
|
|
23
|
+
Configuration class for RBLNASTForAudioClassification.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
@deprecate_kwarg(old_name="num_mel_bins", version="0.10.0")
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
batch_size: Optional[int] = None,
|
|
30
|
+
max_length: Optional[int] = None,
|
|
31
|
+
**kwargs: Any,
|
|
32
|
+
):
|
|
33
|
+
"""
|
|
34
|
+
Args:
|
|
35
|
+
batch_size (Optional[int]): The batch size for inference. Defaults to 1.
|
|
36
|
+
max_length (Optional[int]): Maximum length of the audio input in time dimension.
|
|
37
|
+
kwargs: Additional arguments passed to the parent RBLNModelConfig.
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
ValueError: If batch_size is not a positive integer.
|
|
41
|
+
"""
|
|
42
|
+
super().__init__(**kwargs)
|
|
43
|
+
self.batch_size = batch_size or 1
|
|
44
|
+
if not isinstance(self.batch_size, int) or self.batch_size < 0:
|
|
45
|
+
raise ValueError(f"batch_size must be a positive integer, got {self.batch_size}")
|
|
46
|
+
|
|
47
|
+
self.max_length = max_length
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from typing import TYPE_CHECKING, Optional
|
|
16
|
+
|
|
17
|
+
import torch
|
|
18
|
+
from transformers import AutoModelForAudioClassification
|
|
19
|
+
from transformers.modeling_outputs import SequenceClassifierOutput
|
|
20
|
+
|
|
21
|
+
from ....configuration_utils import RBLNCompileConfig
|
|
22
|
+
from ....modeling import RBLNModel
|
|
23
|
+
from .configuration_audio_spectrogram_transformer import RBLNASTForAudioClassificationConfig
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
from transformers import AutoFeatureExtractor, PretrainedConfig, PreTrainedModel
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class RBLNASTForAudioClassification(RBLNModel):
|
|
31
|
+
"""
|
|
32
|
+
Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2.
|
|
33
|
+
This model inherits from [RBLNModelForAudioClassification]. Check the superclass documentation for the generic methods the library implements for all its models.
|
|
34
|
+
|
|
35
|
+
A class to convert and run pre-trained transformer-based ASTForAudioClassification models on RBLN devices.
|
|
36
|
+
It implements the methods to convert a pre-trained transformers ASTForAudioClassification model into a RBLN transformer model by:
|
|
37
|
+
|
|
38
|
+
- transferring the checkpoint weights of the original into an optimized RBLN graph,
|
|
39
|
+
- compiling the resulting graph using the RBLN Compiler.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
auto_model_class = AutoModelForAudioClassification
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
def _update_rbln_config(
|
|
46
|
+
cls,
|
|
47
|
+
preprocessors: "AutoFeatureExtractor" = None,
|
|
48
|
+
model: Optional["PreTrainedModel"] = None,
|
|
49
|
+
model_config: "PretrainedConfig" = None,
|
|
50
|
+
rbln_config: Optional[RBLNASTForAudioClassificationConfig] = None,
|
|
51
|
+
) -> RBLNASTForAudioClassificationConfig:
|
|
52
|
+
num_mel_bins = getattr(model_config, "num_mel_bins", None)
|
|
53
|
+
|
|
54
|
+
if rbln_config.max_length is None:
|
|
55
|
+
rbln_config.max_length = getattr(model_config, "max_length", None)
|
|
56
|
+
for feature_extractor in preprocessors:
|
|
57
|
+
if hasattr(feature_extractor, "max_length"):
|
|
58
|
+
rbln_config.max_length = feature_extractor.max_length
|
|
59
|
+
break
|
|
60
|
+
|
|
61
|
+
if rbln_config.max_length is None:
|
|
62
|
+
raise ValueError("max_length should be specified!")
|
|
63
|
+
|
|
64
|
+
input_info = [
|
|
65
|
+
(
|
|
66
|
+
"input_values",
|
|
67
|
+
[rbln_config.batch_size, rbln_config.max_length, num_mel_bins],
|
|
68
|
+
"float32",
|
|
69
|
+
),
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
rbln_config.set_compile_cfgs([RBLNCompileConfig(input_info=input_info)])
|
|
73
|
+
return rbln_config
|
|
74
|
+
|
|
75
|
+
def forward(self, input_values: torch.Tensor, **kwargs) -> SequenceClassifierOutput:
|
|
76
|
+
"""
|
|
77
|
+
Forward pass for the RBLN-optimized Audio Spectrogram Transformer model for audio classification.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
input_values (torch.FloatTensor of shape (batch_size, max_length, num_mel_bins)):
|
|
81
|
+
Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
|
|
82
|
+
loading a .flac or .wav audio file into an array of type list[float], a numpy.ndarray or a torch.Tensor, *e.g.* via
|
|
83
|
+
the torchcodec library (pip install torchcodec) or the soundfile library (pip install soundfile).
|
|
84
|
+
To prepare the array into input_features, the [AutoFeatureExtractor] should be used for extracting the
|
|
85
|
+
mel features, padding and conversion into a tensor of type torch.FloatTensor.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Returns a SequenceClassifierOutput object.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
return super().forward(input_values, **kwargs)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Copyright 2025 Rebellions Inc. All rights reserved.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at:
|
|
6
|
+
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from .modeling_auto import (
|
|
16
|
+
RBLNAutoModel,
|
|
17
|
+
RBLNAutoModelForAudioClassification,
|
|
18
|
+
RBLNAutoModelForCausalLM,
|
|
19
|
+
RBLNAutoModelForCTC,
|
|
20
|
+
RBLNAutoModelForDepthEstimation,
|
|
21
|
+
RBLNAutoModelForImageClassification,
|
|
22
|
+
RBLNAutoModelForImageTextToText,
|
|
23
|
+
RBLNAutoModelForMaskedLM,
|
|
24
|
+
RBLNAutoModelForQuestionAnswering,
|
|
25
|
+
RBLNAutoModelForSeq2SeqLM,
|
|
26
|
+
RBLNAutoModelForSequenceClassification,
|
|
27
|
+
RBLNAutoModelForSpeechSeq2Seq,
|
|
28
|
+
RBLNAutoModelForTextEncoding,
|
|
29
|
+
RBLNAutoModelForVision2Seq,
|
|
30
|
+
RBLNAutoModelForZeroShotObjectDetection,
|
|
31
|
+
)
|