transformers 5.0.0rc1__py3-none-any.whl → 5.0.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- transformers/__init__.py +20 -1
- transformers/activations.py +1 -1
- transformers/audio_utils.py +0 -1
- transformers/cache_utils.py +17 -15
- transformers/configuration_utils.py +114 -70
- transformers/conversion_mapping.py +68 -5
- transformers/core_model_loading.py +201 -35
- transformers/dependency_versions_table.py +1 -1
- transformers/feature_extraction_utils.py +54 -22
- transformers/generation/candidate_generator.py +79 -31
- transformers/generation/configuration_utils.py +162 -122
- transformers/generation/continuous_batching/cache.py +47 -18
- transformers/generation/continuous_batching/cache_manager.py +131 -34
- transformers/generation/continuous_batching/continuous_api.py +101 -64
- transformers/generation/continuous_batching/requests.py +28 -1
- transformers/generation/continuous_batching/scheduler.py +11 -4
- transformers/generation/stopping_criteria.py +1 -1
- transformers/generation/utils.py +108 -110
- transformers/generation/watermarking.py +8 -5
- transformers/image_processing_base.py +2 -12
- transformers/image_processing_utils_fast.py +15 -4
- transformers/initialization.py +37 -0
- transformers/integrations/__init__.py +12 -0
- transformers/integrations/accelerate.py +44 -111
- transformers/integrations/aqlm.py +3 -5
- transformers/integrations/awq.py +2 -5
- transformers/integrations/bitnet.py +5 -8
- transformers/integrations/bitsandbytes.py +16 -15
- transformers/integrations/deepspeed.py +18 -3
- transformers/integrations/eetq.py +3 -5
- transformers/integrations/fbgemm_fp8.py +1 -1
- transformers/integrations/finegrained_fp8.py +6 -16
- transformers/integrations/flash_attention.py +2 -2
- transformers/integrations/higgs.py +2 -5
- transformers/integrations/hub_kernels.py +23 -5
- transformers/integrations/integration_utils.py +35 -0
- transformers/integrations/mistral.py +12 -0
- transformers/integrations/moe.py +240 -0
- transformers/integrations/mxfp4.py +4 -10
- transformers/integrations/peft.py +5 -0
- transformers/integrations/quanto.py +5 -2
- transformers/integrations/spqr.py +3 -5
- transformers/integrations/tensor_parallel.py +167 -221
- transformers/integrations/vptq.py +3 -5
- transformers/modeling_gguf_pytorch_utils.py +66 -19
- transformers/modeling_rope_utils.py +78 -81
- transformers/modeling_utils.py +583 -503
- transformers/models/__init__.py +19 -0
- transformers/models/afmoe/modeling_afmoe.py +7 -16
- transformers/models/afmoe/modular_afmoe.py +5 -13
- transformers/models/aimv2/modeling_aimv2.py +4 -0
- transformers/models/aimv2/modular_aimv2.py +4 -0
- transformers/models/albert/modeling_albert.py +3 -0
- transformers/models/align/modeling_align.py +12 -6
- transformers/models/altclip/modeling_altclip.py +7 -3
- transformers/models/apertus/modeling_apertus.py +4 -2
- transformers/models/apertus/modular_apertus.py +4 -1
- transformers/models/arcee/modeling_arcee.py +1 -1
- transformers/models/aria/modeling_aria.py +8 -4
- transformers/models/aria/modular_aria.py +7 -3
- transformers/models/audioflamingo3/processing_audioflamingo3.py +27 -22
- transformers/models/auto/auto_factory.py +1 -1
- transformers/models/auto/configuration_auto.py +27 -0
- transformers/models/auto/feature_extraction_auto.py +7 -3
- transformers/models/auto/image_processing_auto.py +4 -2
- transformers/models/auto/modeling_auto.py +31 -0
- transformers/models/auto/processing_auto.py +4 -0
- transformers/models/auto/tokenization_auto.py +132 -153
- transformers/models/auto/video_processing_auto.py +5 -2
- transformers/models/aya_vision/modeling_aya_vision.py +7 -3
- transformers/models/bamba/modeling_bamba.py +18 -19
- transformers/models/bamba/modular_bamba.py +17 -16
- transformers/models/bark/modeling_bark.py +9 -0
- transformers/models/bart/configuration_bart.py +0 -1
- transformers/models/bart/modeling_bart.py +7 -0
- transformers/models/beit/image_processing_beit_fast.py +0 -1
- transformers/models/bert/modeling_bert.py +3 -0
- transformers/models/bert_generation/modeling_bert_generation.py +2 -0
- transformers/models/big_bird/modeling_big_bird.py +3 -0
- transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +7 -0
- transformers/models/bit/modeling_bit.py +5 -1
- transformers/models/bitnet/modeling_bitnet.py +1 -1
- transformers/models/blenderbot/modeling_blenderbot.py +7 -0
- transformers/models/blenderbot/tokenization_blenderbot.py +6 -7
- transformers/models/blenderbot_small/modeling_blenderbot_small.py +7 -0
- transformers/models/blip/modeling_blip.py +2 -0
- transformers/models/blip/modeling_blip_text.py +8 -0
- transformers/models/blip_2/modeling_blip_2.py +2 -0
- transformers/models/bloom/modeling_bloom.py +13 -44
- transformers/models/blt/modeling_blt.py +162 -2
- transformers/models/blt/modular_blt.py +168 -3
- transformers/models/bridgetower/image_processing_bridgetower_fast.py +0 -2
- transformers/models/bridgetower/modeling_bridgetower.py +6 -0
- transformers/models/bros/modeling_bros.py +8 -0
- transformers/models/camembert/modeling_camembert.py +109 -106
- transformers/models/canine/modeling_canine.py +6 -0
- transformers/models/canine/tokenization_canine.py +2 -0
- transformers/models/chameleon/modeling_chameleon.py +9 -4
- transformers/models/chinese_clip/modeling_chinese_clip.py +6 -3
- transformers/models/clap/feature_extraction_clap.py +2 -2
- transformers/models/clap/modeling_clap.py +25 -15
- transformers/models/clip/modeling_clip.py +2 -0
- transformers/models/clipseg/modeling_clipseg.py +4 -0
- transformers/models/clvp/modeling_clvp.py +14 -3
- transformers/models/code_llama/tokenization_code_llama.py +1 -1
- transformers/models/codegen/modeling_codegen.py +13 -4
- transformers/models/cohere/modeling_cohere.py +1 -1
- transformers/models/cohere2/modeling_cohere2.py +1 -1
- transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +0 -1
- transformers/models/cohere2_vision/modeling_cohere2_vision.py +7 -3
- transformers/models/conditional_detr/configuration_conditional_detr.py +1 -1
- transformers/models/conditional_detr/modeling_conditional_detr.py +4 -1
- transformers/models/convbert/modeling_convbert.py +3 -0
- transformers/models/convnext/image_processing_convnext.py +2 -2
- transformers/models/convnext/image_processing_convnext_fast.py +9 -13
- transformers/models/csm/generation_csm.py +19 -22
- transformers/models/csm/modeling_csm.py +3 -1
- transformers/models/csm/modular_csm.py +2 -0
- transformers/models/ctrl/modeling_ctrl.py +14 -2
- transformers/models/cvt/modeling_cvt.py +5 -1
- transformers/models/cwm/modeling_cwm.py +1 -1
- transformers/models/d_fine/configuration_d_fine.py +3 -4
- transformers/models/d_fine/modeling_d_fine.py +46 -39
- transformers/models/d_fine/modular_d_fine.py +15 -4
- transformers/models/dab_detr/configuration_dab_detr.py +2 -2
- transformers/models/dab_detr/modeling_dab_detr.py +1 -1
- transformers/models/dac/modeling_dac.py +4 -4
- transformers/models/data2vec/modeling_data2vec_text.py +7 -0
- transformers/models/data2vec/modular_data2vec_text.py +7 -0
- transformers/models/dbrx/configuration_dbrx.py +9 -1
- transformers/models/dbrx/modeling_dbrx.py +1 -1
- transformers/models/deberta/modeling_deberta.py +2 -0
- transformers/models/deberta_v2/modeling_deberta_v2.py +2 -0
- transformers/models/decision_transformer/modeling_decision_transformer.py +8 -5
- transformers/models/deepseek_v2/modeling_deepseek_v2.py +7 -4
- transformers/models/deepseek_v2/modular_deepseek_v2.py +4 -2
- transformers/models/deepseek_v3/modeling_deepseek_v3.py +9 -5
- transformers/models/deepseek_v3/modular_deepseek_v3.py +6 -2
- transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +0 -1
- transformers/models/deepseek_vl/modeling_deepseek_vl.py +9 -5
- transformers/models/deepseek_vl/modular_deepseek_vl.py +3 -0
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +0 -4
- transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +9 -5
- transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +9 -9
- transformers/models/deformable_detr/configuration_deformable_detr.py +2 -2
- transformers/models/deformable_detr/modeling_deformable_detr.py +1 -1
- transformers/models/depth_anything/configuration_depth_anything.py +2 -3
- transformers/models/depth_pro/image_processing_depth_pro_fast.py +0 -1
- transformers/models/detr/configuration_detr.py +1 -1
- transformers/models/detr/modeling_detr.py +8 -1
- transformers/models/dia/generation_dia.py +3 -10
- transformers/models/dia/modeling_dia.py +12 -1
- transformers/models/dia/modular_dia.py +11 -0
- transformers/models/dia/processing_dia.py +1 -1
- transformers/models/diffllama/modeling_diffllama.py +3 -3
- transformers/models/diffllama/modular_diffllama.py +2 -2
- transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +0 -1
- transformers/models/dinov3_vit/modeling_dinov3_vit.py +3 -0
- transformers/models/dinov3_vit/modular_dinov3_vit.py +3 -0
- transformers/models/distilbert/modeling_distilbert.py +11 -9
- transformers/models/doge/modeling_doge.py +1 -1
- transformers/models/donut/image_processing_donut_fast.py +0 -1
- transformers/models/donut/modeling_donut_swin.py +16 -12
- transformers/models/dots1/modeling_dots1.py +14 -5
- transformers/models/dpt/configuration_dpt.py +1 -1
- transformers/models/dpt/image_processing_dpt_fast.py +1 -2
- transformers/models/dpt/modular_dpt.py +1 -2
- transformers/models/edgetam/configuration_edgetam.py +1 -1
- transformers/models/edgetam/modeling_edgetam.py +5 -2
- transformers/models/edgetam/modular_edgetam.py +15 -14
- transformers/models/edgetam_video/modeling_edgetam_video.py +55 -43
- transformers/models/edgetam_video/modular_edgetam_video.py +13 -19
- transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +1 -2
- transformers/models/efficientloftr/modeling_efficientloftr.py +14 -1
- transformers/models/efficientnet/image_processing_efficientnet.py +5 -6
- transformers/models/efficientnet/image_processing_efficientnet_fast.py +1 -2
- transformers/models/efficientnet/modeling_efficientnet.py +5 -1
- transformers/models/electra/modeling_electra.py +7 -0
- transformers/models/emu3/modeling_emu3.py +8 -2
- transformers/models/emu3/modular_emu3.py +7 -1
- transformers/models/encodec/modeling_encodec.py +14 -0
- transformers/models/eomt/image_processing_eomt_fast.py +46 -14
- transformers/models/eomt/modeling_eomt.py +7 -0
- transformers/models/eomt/modular_eomt.py +7 -0
- transformers/models/ernie/modeling_ernie.py +6 -0
- transformers/models/ernie/modular_ernie.py +6 -0
- transformers/models/ernie4_5/modeling_ernie4_5.py +1 -1
- transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +16 -13
- transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +9 -35
- transformers/models/ernie4_5_vl_moe/__init__.py +31 -0
- transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +330 -0
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +456 -0
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +232 -0
- transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +1898 -0
- transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +1904 -0
- transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +251 -0
- transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +594 -0
- transformers/models/esm/modeling_esm.py +6 -0
- transformers/models/esm/modeling_esmfold.py +6 -1
- transformers/models/evolla/modeling_evolla.py +9 -1
- transformers/models/evolla/modular_evolla.py +8 -0
- transformers/models/exaone4/modeling_exaone4.py +1 -1
- transformers/models/falcon/modeling_falcon.py +3 -3
- transformers/models/falcon_h1/modeling_falcon_h1.py +28 -23
- transformers/models/falcon_h1/modular_falcon_h1.py +7 -2
- transformers/models/falcon_mamba/modeling_falcon_mamba.py +6 -2
- transformers/models/falcon_mamba/modular_falcon_mamba.py +7 -2
- transformers/models/fast_vlm/modeling_fast_vlm.py +7 -3
- transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +23 -10
- transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +1 -0
- transformers/models/flaubert/modeling_flaubert.py +14 -15
- transformers/models/flava/image_processing_flava_fast.py +0 -2
- transformers/models/flava/modeling_flava.py +4 -1
- transformers/models/flex_olmo/modeling_flex_olmo.py +7 -4
- transformers/models/florence2/modeling_florence2.py +20 -3
- transformers/models/florence2/modular_florence2.py +13 -0
- transformers/models/fnet/modeling_fnet.py +7 -0
- transformers/models/fuyu/image_processing_fuyu.py +1 -1
- transformers/models/fuyu/modeling_fuyu.py +3 -1
- transformers/models/fuyu/processing_fuyu.py +16 -0
- transformers/models/gemma/modeling_gemma.py +10 -12
- transformers/models/gemma/modular_gemma.py +9 -11
- transformers/models/gemma2/modeling_gemma2.py +1 -1
- transformers/models/gemma2/modular_gemma2.py +1 -1
- transformers/models/gemma3/image_processing_gemma3_fast.py +0 -1
- transformers/models/gemma3/modeling_gemma3.py +28 -7
- transformers/models/gemma3/modular_gemma3.py +26 -6
- transformers/models/gemma3n/configuration_gemma3n.py +3 -0
- transformers/models/gemma3n/modeling_gemma3n.py +47 -9
- transformers/models/gemma3n/modular_gemma3n.py +51 -9
- transformers/models/git/modeling_git.py +181 -126
- transformers/models/glm/modeling_glm.py +1 -1
- transformers/models/glm4/modeling_glm4.py +1 -1
- transformers/models/glm46v/image_processing_glm46v.py +0 -4
- transformers/models/glm46v/modeling_glm46v.py +3 -1
- transformers/models/glm46v/modular_glm46v.py +3 -0
- transformers/models/glm4_moe/modeling_glm4_moe.py +9 -5
- transformers/models/glm4_moe/modular_glm4_moe.py +1 -1
- transformers/models/glm4v/image_processing_glm4v.py +0 -4
- transformers/models/glm4v/modeling_glm4v.py +15 -5
- transformers/models/glm4v/modular_glm4v.py +11 -3
- transformers/models/glm4v_moe/modeling_glm4v_moe.py +39 -23
- transformers/models/glm4v_moe/modular_glm4v_moe.py +12 -0
- transformers/models/glmasr/__init__.py +30 -0
- transformers/models/glmasr/configuration_glmasr.py +197 -0
- transformers/models/glmasr/modeling_glmasr.py +512 -0
- transformers/models/glmasr/modular_glmasr.py +433 -0
- transformers/models/glmasr/processing_glmasr.py +332 -0
- transformers/models/glpn/image_processing_glpn_fast.py +0 -1
- transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +0 -1
- transformers/models/got_ocr2/modeling_got_ocr2.py +8 -3
- transformers/models/gpt2/modeling_gpt2.py +8 -5
- transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +3 -8
- transformers/models/gpt_neo/modeling_gpt_neo.py +15 -3
- transformers/models/gpt_neox/modeling_gpt_neox.py +1 -1
- transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +1 -1
- transformers/models/gpt_oss/configuration_gpt_oss.py +17 -0
- transformers/models/gpt_oss/modeling_gpt_oss.py +6 -9
- transformers/models/gpt_oss/modular_gpt_oss.py +5 -7
- transformers/models/gptj/modeling_gptj.py +15 -6
- transformers/models/granite/modeling_granite.py +1 -1
- transformers/models/granite_speech/modeling_granite_speech.py +15 -1
- transformers/models/granitemoe/modeling_granitemoe.py +2 -3
- transformers/models/granitemoe/modular_granitemoe.py +1 -2
- transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +4 -0
- transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +33 -23
- transformers/models/granitemoehybrid/modular_granitemoehybrid.py +12 -2
- transformers/models/granitemoeshared/modeling_granitemoeshared.py +2 -3
- transformers/models/grounding_dino/configuration_grounding_dino.py +2 -3
- transformers/models/grounding_dino/modeling_grounding_dino.py +4 -4
- transformers/models/groupvit/modeling_groupvit.py +6 -1
- transformers/models/helium/modeling_helium.py +1 -1
- transformers/models/hgnet_v2/modeling_hgnet_v2.py +10 -0
- transformers/models/hgnet_v2/modular_hgnet_v2.py +10 -0
- transformers/models/hubert/modeling_hubert.py +4 -0
- transformers/models/hubert/modular_hubert.py +4 -0
- transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +1 -1
- transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +1 -1
- transformers/models/hunyuan_v1_moe/__init__.py +1 -1
- transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +12 -4
- transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +4 -2
- transformers/models/ibert/modeling_ibert.py +16 -0
- transformers/models/idefics/modeling_idefics.py +10 -0
- transformers/models/idefics2/modeling_idefics2.py +7 -1
- transformers/models/idefics3/modeling_idefics3.py +5 -1
- transformers/models/imagegpt/image_processing_imagegpt_fast.py +1 -5
- transformers/models/imagegpt/modeling_imagegpt.py +9 -2
- transformers/models/instructblip/modeling_instructblip.py +2 -0
- transformers/models/instructblipvideo/modeling_instructblipvideo.py +52 -50
- transformers/models/instructblipvideo/video_processing_instructblipvideo.py +0 -1
- transformers/models/internvl/modeling_internvl.py +11 -8
- transformers/models/internvl/modular_internvl.py +5 -9
- transformers/models/internvl/video_processing_internvl.py +0 -1
- transformers/models/jais2/__init__.py +27 -0
- transformers/models/jais2/configuration_jais2.py +152 -0
- transformers/models/jais2/modeling_jais2.py +486 -0
- transformers/models/jais2/modular_jais2.py +196 -0
- transformers/models/jamba/modeling_jamba.py +24 -19
- transformers/models/jamba/modular_jamba.py +17 -17
- transformers/models/janus/image_processing_janus_fast.py +0 -1
- transformers/models/janus/modeling_janus.py +15 -7
- transformers/models/janus/modular_janus.py +16 -7
- transformers/models/jetmoe/modeling_jetmoe.py +2 -2
- transformers/models/jetmoe/modular_jetmoe.py +1 -0
- transformers/models/kosmos2/modeling_kosmos2.py +14 -2
- transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +2 -2
- transformers/models/kosmos2_5/modeling_kosmos2_5.py +10 -1
- transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +9 -3
- transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +9 -1
- transformers/models/lasr/configuration_lasr.py +4 -0
- transformers/models/lasr/modeling_lasr.py +3 -2
- transformers/models/lasr/modular_lasr.py +8 -1
- transformers/models/lasr/processing_lasr.py +0 -2
- transformers/models/layoutlm/modeling_layoutlm.py +5 -3
- transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +0 -1
- transformers/models/layoutlmv2/modeling_layoutlmv2.py +12 -0
- transformers/models/layoutlmv2/tokenization_layoutlmv2.py +1 -0
- transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +0 -1
- transformers/models/layoutlmv3/modeling_layoutlmv3.py +29 -5
- transformers/models/led/modeling_led.py +6 -0
- transformers/models/levit/modeling_levit.py +18 -0
- transformers/models/lfm2/modeling_lfm2.py +1 -1
- transformers/models/lfm2_moe/modeling_lfm2_moe.py +14 -4
- transformers/models/lfm2_moe/modular_lfm2_moe.py +5 -28
- transformers/models/lfm2_vl/configuration_lfm2_vl.py +4 -0
- transformers/models/lfm2_vl/modeling_lfm2_vl.py +11 -5
- transformers/models/lfm2_vl/modular_lfm2_vl.py +4 -2
- transformers/models/lfm2_vl/processing_lfm2_vl.py +82 -42
- transformers/models/lightglue/image_processing_lightglue_fast.py +1 -2
- transformers/models/lilt/modeling_lilt.py +19 -15
- transformers/models/llama/modeling_llama.py +1 -1
- transformers/models/llama4/image_processing_llama4_fast.py +1 -2
- transformers/models/llama4/modeling_llama4.py +8 -4
- transformers/models/llava/image_processing_llava_fast.py +0 -1
- transformers/models/llava/modeling_llava.py +12 -7
- transformers/models/llava_next/image_processing_llava_next_fast.py +0 -1
- transformers/models/llava_next/modeling_llava_next.py +7 -3
- transformers/models/llava_next_video/modeling_llava_next_video.py +7 -3
- transformers/models/llava_next_video/modular_llava_next_video.py +7 -3
- transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +0 -1
- transformers/models/llava_onevision/modeling_llava_onevision.py +7 -3
- transformers/models/llava_onevision/modular_llava_onevision.py +7 -4
- transformers/models/longcat_flash/modeling_longcat_flash.py +2 -1
- transformers/models/longcat_flash/modular_longcat_flash.py +1 -0
- transformers/models/longt5/modeling_longt5.py +0 -4
- transformers/models/m2m_100/modeling_m2m_100.py +10 -0
- transformers/models/mamba/modeling_mamba.py +2 -1
- transformers/models/mamba2/modeling_mamba2.py +24 -23
- transformers/models/marian/configuration_marian.py +1 -1
- transformers/models/marian/modeling_marian.py +3 -0
- transformers/models/markuplm/modeling_markuplm.py +5 -8
- transformers/models/mask2former/configuration_mask2former.py +3 -3
- transformers/models/mask2former/image_processing_mask2former_fast.py +1 -4
- transformers/models/mask2former/modeling_mask2former.py +9 -0
- transformers/models/maskformer/configuration_maskformer.py +3 -3
- transformers/models/maskformer/image_processing_maskformer_fast.py +1 -4
- transformers/models/maskformer/modeling_maskformer.py +9 -1
- transformers/models/maskformer/modeling_maskformer_swin.py +19 -15
- transformers/models/mbart/configuration_mbart.py +1 -0
- transformers/models/mbart/modeling_mbart.py +7 -0
- transformers/models/megatron_bert/modeling_megatron_bert.py +2 -0
- transformers/models/metaclip_2/modeling_metaclip_2.py +2 -0
- transformers/models/metaclip_2/modular_metaclip_2.py +2 -0
- transformers/models/mimi/modeling_mimi.py +25 -4
- transformers/models/minimax/modeling_minimax.py +16 -3
- transformers/models/minimax/modular_minimax.py +12 -1
- transformers/models/ministral/modeling_ministral.py +1 -1
- transformers/models/ministral3/modeling_ministral3.py +1 -1
- transformers/models/mistral/modeling_mistral.py +1 -1
- transformers/models/mistral3/modeling_mistral3.py +10 -4
- transformers/models/mistral3/modular_mistral3.py +3 -1
- transformers/models/mixtral/modeling_mixtral.py +12 -4
- transformers/models/mixtral/modular_mixtral.py +6 -2
- transformers/models/mlcd/modeling_mlcd.py +6 -0
- transformers/models/mlcd/modular_mlcd.py +4 -0
- transformers/models/mllama/modeling_mllama.py +13 -2
- transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +1 -2
- transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +4 -4
- transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +1 -2
- transformers/models/mobilebert/modeling_mobilebert.py +2 -0
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +0 -1
- transformers/models/mobilevit/image_processing_mobilevit.py +5 -5
- transformers/models/mobilevit/image_processing_mobilevit_fast.py +1 -2
- transformers/models/mobilevit/modeling_mobilevit.py +4 -0
- transformers/models/mobilevitv2/modeling_mobilevitv2.py +4 -0
- transformers/models/modernbert/modeling_modernbert.py +12 -1
- transformers/models/modernbert/modular_modernbert.py +12 -1
- transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +9 -1
- transformers/models/modernbert_decoder/modular_modernbert_decoder.py +9 -1
- transformers/models/moonshine/modeling_moonshine.py +1 -1
- transformers/models/moshi/modeling_moshi.py +21 -51
- transformers/models/mpnet/modeling_mpnet.py +2 -0
- transformers/models/mra/modeling_mra.py +4 -1
- transformers/models/mt5/configuration_mt5.py +2 -3
- transformers/models/mt5/modeling_mt5.py +0 -10
- transformers/models/musicgen/modeling_musicgen.py +5 -9
- transformers/models/musicgen_melody/modeling_musicgen_melody.py +4 -0
- transformers/models/mvp/modeling_mvp.py +7 -0
- transformers/models/nanochat/modeling_nanochat.py +1 -1
- transformers/models/nemotron/modeling_nemotron.py +3 -3
- transformers/models/nllb_moe/configuration_nllb_moe.py +1 -0
- transformers/models/nllb_moe/modeling_nllb_moe.py +10 -0
- transformers/models/nougat/image_processing_nougat_fast.py +0 -1
- transformers/models/nougat/tokenization_nougat.py +11 -16
- transformers/models/nystromformer/modeling_nystromformer.py +7 -0
- transformers/models/olmo/modeling_olmo.py +1 -1
- transformers/models/olmo2/modeling_olmo2.py +1 -1
- transformers/models/olmo3/modeling_olmo3.py +1 -1
- transformers/models/olmoe/modeling_olmoe.py +12 -4
- transformers/models/olmoe/modular_olmoe.py +4 -2
- transformers/models/omdet_turbo/configuration_omdet_turbo.py +2 -2
- transformers/models/omdet_turbo/modeling_omdet_turbo.py +4 -0
- transformers/models/oneformer/configuration_oneformer.py +3 -3
- transformers/models/oneformer/modeling_oneformer.py +7 -38
- transformers/models/openai/modeling_openai.py +12 -0
- transformers/models/ovis2/image_processing_ovis2_fast.py +0 -1
- transformers/models/ovis2/modeling_ovis2.py +15 -3
- transformers/models/ovis2/modular_ovis2.py +8 -0
- transformers/models/owlv2/image_processing_owlv2_fast.py +0 -2
- transformers/models/owlv2/modeling_owlv2.py +7 -3
- transformers/models/owlv2/modular_owlv2.py +0 -2
- transformers/models/owlvit/modeling_owlvit.py +7 -3
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +3 -2
- transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +28 -14
- transformers/models/paddleocr_vl/modular_paddleocr_vl.py +22 -12
- transformers/models/paligemma/modeling_paligemma.py +25 -17
- transformers/models/parakeet/modeling_parakeet.py +5 -0
- transformers/models/parakeet/modular_parakeet.py +5 -0
- transformers/models/parakeet/{tokenization_parakeet_fast.py → tokenization_parakeet.py} +3 -3
- transformers/models/patchtsmixer/modeling_patchtsmixer.py +4 -0
- transformers/models/patchtst/modeling_patchtst.py +5 -4
- transformers/models/pe_audio/__init__.py +30 -0
- transformers/models/pe_audio/configuration_pe_audio.py +206 -0
- transformers/models/pe_audio/feature_extraction_pe_audio.py +162 -0
- transformers/models/pe_audio/modeling_pe_audio.py +820 -0
- transformers/models/pe_audio/modular_pe_audio.py +299 -0
- transformers/models/pe_audio/processing_pe_audio.py +24 -0
- transformers/models/pe_audio_video/__init__.py +29 -0
- transformers/models/pe_audio_video/configuration_pe_audio_video.py +225 -0
- transformers/models/pe_audio_video/modeling_pe_audio_video.py +972 -0
- transformers/models/pe_audio_video/modular_pe_audio_video.py +764 -0
- transformers/models/pe_audio_video/processing_pe_audio_video.py +25 -0
- transformers/models/pe_video/__init__.py +30 -0
- transformers/models/pe_video/configuration_pe_video.py +211 -0
- transformers/models/pe_video/modeling_pe_video.py +636 -0
- transformers/models/pe_video/modular_pe_video.py +219 -0
- transformers/models/pe_video/processing_pe_video.py +10 -0
- transformers/models/pe_video/video_processing_pe_video.py +66 -0
- transformers/models/pegasus/configuration_pegasus.py +1 -0
- transformers/models/pegasus/modeling_pegasus.py +3 -0
- transformers/models/pegasus_x/modeling_pegasus_x.py +1 -0
- transformers/models/perceiver/image_processing_perceiver_fast.py +0 -1
- transformers/models/perceiver/modeling_perceiver.py +5 -1
- transformers/models/perception_lm/image_processing_perception_lm_fast.py +0 -1
- transformers/models/perception_lm/modeling_perception_lm.py +7 -3
- transformers/models/perception_lm/modular_perception_lm.py +7 -3
- transformers/models/persimmon/modeling_persimmon.py +1 -1
- transformers/models/phi/modeling_phi.py +1 -1
- transformers/models/phi3/modeling_phi3.py +1 -1
- transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +4 -1
- transformers/models/phi4_multimodal/modular_phi4_multimodal.py +3 -0
- transformers/models/phi4_multimodal/processing_phi4_multimodal.py +0 -2
- transformers/models/phimoe/modeling_phimoe.py +12 -4
- transformers/models/phimoe/modular_phimoe.py +1 -1
- transformers/models/pix2struct/processing_pix2struct.py +0 -4
- transformers/models/pixio/__init__.py +30 -0
- transformers/models/pixio/configuration_pixio.py +151 -0
- transformers/models/pixio/modeling_pixio.py +507 -0
- transformers/models/pixio/modular_pixio.py +404 -0
- transformers/models/pixtral/modeling_pixtral.py +1 -1
- transformers/models/pixtral/processing_pixtral.py +3 -1
- transformers/models/plbart/configuration_plbart.py +1 -0
- transformers/models/plbart/modeling_plbart.py +7 -0
- transformers/models/plbart/modular_plbart.py +6 -0
- transformers/models/poolformer/image_processing_poolformer_fast.py +0 -1
- transformers/models/poolformer/modeling_poolformer.py +11 -1
- transformers/models/pop2piano/configuration_pop2piano.py +0 -1
- transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +2 -3
- transformers/models/prophetnet/modeling_prophetnet.py +2 -1
- transformers/models/qwen2/modeling_qwen2.py +1 -1
- transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +104 -64
- transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +58 -18
- transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +18 -5
- transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +26 -22
- transformers/models/qwen2_audio/modeling_qwen2_audio.py +2 -2
- transformers/models/qwen2_moe/modeling_qwen2_moe.py +12 -4
- transformers/models/qwen2_vl/image_processing_qwen2_vl.py +3 -2
- transformers/models/qwen2_vl/modeling_qwen2_vl.py +17 -4
- transformers/models/qwen3/modeling_qwen3.py +1 -1
- transformers/models/qwen3_moe/modeling_qwen3_moe.py +12 -4
- transformers/models/qwen3_next/modeling_qwen3_next.py +4 -6
- transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +4 -0
- transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +92 -46
- transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +48 -4
- transformers/models/qwen3_vl/configuration_qwen3_vl.py +5 -5
- transformers/models/qwen3_vl/modeling_qwen3_vl.py +17 -4
- transformers/models/qwen3_vl/modular_qwen3_vl.py +21 -10
- transformers/models/qwen3_vl/processing_qwen3_vl.py +3 -3
- transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +94 -112
- transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +32 -81
- transformers/models/rag/configuration_rag.py +0 -8
- transformers/models/rag/modeling_rag.py +7 -9
- transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +3 -2
- transformers/models/reformer/modeling_reformer.py +9 -1
- transformers/models/regnet/modeling_regnet.py +4 -0
- transformers/models/rembert/modeling_rembert.py +7 -1
- transformers/models/resnet/modeling_resnet.py +8 -3
- transformers/models/roberta/modeling_roberta.py +3 -0
- transformers/models/roberta/modular_roberta.py +3 -0
- transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +3 -0
- transformers/models/roc_bert/modeling_roc_bert.py +3 -0
- transformers/models/rt_detr/configuration_rt_detr.py +1 -1
- transformers/models/rt_detr/modeling_rt_detr.py +4 -0
- transformers/models/rt_detr/modeling_rt_detr_resnet.py +8 -3
- transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +2 -3
- transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +7 -0
- transformers/models/rt_detr_v2/modular_rt_detr_v2.py +8 -3
- transformers/models/rwkv/modeling_rwkv.py +1 -1
- transformers/models/sam/configuration_sam.py +1 -0
- transformers/models/sam/image_processing_sam_fast.py +0 -1
- transformers/models/sam/modeling_sam.py +4 -1
- transformers/models/sam2/configuration_sam2.py +1 -1
- transformers/models/sam2/modeling_sam2.py +5 -1
- transformers/models/sam2/modular_sam2.py +5 -1
- transformers/models/sam2_video/modeling_sam2_video.py +51 -43
- transformers/models/sam2_video/modular_sam2_video.py +31 -18
- transformers/models/sam3/configuration_sam3.py +21 -1
- transformers/models/sam3/modeling_sam3.py +23 -0
- transformers/models/sam3_tracker/modeling_sam3_tracker.py +2 -0
- transformers/models/sam3_tracker/modular_sam3_tracker.py +2 -0
- transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +25 -0
- transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +26 -15
- transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +25 -2
- transformers/models/sam3_video/configuration_sam3_video.py +14 -0
- transformers/models/sam3_video/modeling_sam3_video.py +3 -3
- transformers/models/sam3_video/processing_sam3_video.py +1 -1
- transformers/models/sam_hq/configuration_sam_hq.py +1 -0
- transformers/models/sam_hq/modeling_sam_hq.py +26 -23
- transformers/models/seamless_m4t/modeling_seamless_m4t.py +27 -11
- transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +6 -0
- transformers/models/seed_oss/modeling_seed_oss.py +1 -1
- transformers/models/segformer/image_processing_segformer_fast.py +0 -1
- transformers/models/segformer/modeling_segformer.py +2 -2
- transformers/models/segformer/modular_segformer.py +0 -1
- transformers/models/shieldgemma2/modeling_shieldgemma2.py +1 -0
- transformers/models/siglip/modeling_siglip.py +24 -2
- transformers/models/siglip2/modeling_siglip2.py +63 -41
- transformers/models/smollm3/modeling_smollm3.py +1 -1
- transformers/models/smolvlm/modeling_smolvlm.py +5 -1
- transformers/models/smolvlm/video_processing_smolvlm.py +0 -1
- transformers/models/speech_to_text/modeling_speech_to_text.py +10 -0
- transformers/models/speecht5/modeling_speecht5.py +28 -0
- transformers/models/splinter/modeling_splinter.py +9 -3
- transformers/models/squeezebert/modeling_squeezebert.py +2 -0
- transformers/models/stablelm/modeling_stablelm.py +1 -1
- transformers/models/starcoder2/modeling_starcoder2.py +1 -1
- transformers/models/superglue/image_processing_superglue_fast.py +1 -2
- transformers/models/superpoint/image_processing_superpoint_fast.py +1 -2
- transformers/models/swiftformer/modeling_swiftformer.py +4 -0
- transformers/models/swin/modeling_swin.py +16 -12
- transformers/models/swin2sr/image_processing_swin2sr_fast.py +0 -1
- transformers/models/swin2sr/modeling_swin2sr.py +49 -33
- transformers/models/swinv2/modeling_swinv2.py +41 -33
- transformers/models/switch_transformers/modeling_switch_transformers.py +2 -8
- transformers/models/switch_transformers/modular_switch_transformers.py +2 -8
- transformers/models/t5/configuration_t5.py +7 -1
- transformers/models/t5/modeling_t5.py +1 -7
- transformers/models/t5gemma/modeling_t5gemma.py +1 -1
- transformers/models/t5gemma2/configuration_t5gemma2.py +6 -42
- transformers/models/t5gemma2/modeling_t5gemma2.py +13 -4
- transformers/models/t5gemma2/modular_t5gemma2.py +289 -4
- transformers/models/table_transformer/configuration_table_transformer.py +1 -1
- transformers/models/table_transformer/modeling_table_transformer.py +1 -1
- transformers/models/textnet/image_processing_textnet_fast.py +0 -1
- transformers/models/timesfm/modeling_timesfm.py +12 -0
- transformers/models/timesfm/modular_timesfm.py +12 -0
- transformers/models/timm_backbone/modeling_timm_backbone.py +13 -9
- transformers/models/timm_wrapper/configuration_timm_wrapper.py +3 -0
- transformers/models/timm_wrapper/modeling_timm_wrapper.py +19 -13
- transformers/models/trocr/modeling_trocr.py +1 -2
- transformers/models/tvp/configuration_tvp.py +5 -1
- transformers/models/tvp/modeling_tvp.py +4 -4
- transformers/models/udop/configuration_udop.py +1 -0
- transformers/models/udop/modeling_udop.py +3 -7
- transformers/models/umt5/configuration_umt5.py +2 -2
- transformers/models/umt5/modeling_umt5.py +0 -6
- transformers/models/vaultgemma/modeling_vaultgemma.py +1 -1
- transformers/models/video_llama_3/image_processing_video_llama_3.py +3 -2
- transformers/models/video_llama_3/modeling_video_llama_3.py +12 -1
- transformers/models/video_llama_3/modular_video_llama_3.py +10 -1
- transformers/models/video_llava/modeling_video_llava.py +7 -3
- transformers/models/vilt/configuration_vilt.py +2 -2
- transformers/models/vilt/modeling_vilt.py +7 -0
- transformers/models/vipllava/modeling_vipllava.py +7 -3
- transformers/models/visual_bert/modeling_visual_bert.py +2 -0
- transformers/models/vitmatte/configuration_vitmatte.py +1 -1
- transformers/models/vitmatte/image_processing_vitmatte_fast.py +0 -1
- transformers/models/vitmatte/modeling_vitmatte.py +4 -0
- transformers/models/vitpose/configuration_vitpose.py +1 -1
- transformers/models/vitpose/image_processing_vitpose_fast.py +0 -1
- transformers/models/voxtral/modeling_voxtral.py +2 -2
- transformers/models/voxtral/modular_voxtral.py +2 -2
- transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +16 -10
- transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +7 -0
- transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +21 -11
- transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +21 -11
- transformers/models/whisper/generation_whisper.py +1 -0
- transformers/models/whisper/modeling_whisper.py +5 -3
- transformers/models/x_clip/modeling_x_clip.py +2 -0
- transformers/models/xcodec/modeling_xcodec.py +5 -0
- transformers/models/xglm/modeling_xglm.py +10 -0
- transformers/models/xlm/modeling_xlm.py +13 -14
- transformers/models/xlm_roberta/modeling_xlm_roberta.py +109 -106
- transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +3 -0
- transformers/models/xlnet/modeling_xlnet.py +3 -1
- transformers/models/xmod/modeling_xmod.py +3 -0
- transformers/models/yoso/modeling_yoso.py +4 -1
- transformers/models/zamba/modeling_zamba.py +2 -1
- transformers/models/zamba2/modeling_zamba2.py +3 -2
- transformers/models/zoedepth/configuration_zoedepth.py +1 -1
- transformers/models/zoedepth/image_processing_zoedepth_fast.py +1 -3
- transformers/models/zoedepth/modeling_zoedepth.py +7 -0
- transformers/pipelines/__init__.py +9 -6
- transformers/pipelines/automatic_speech_recognition.py +20 -12
- transformers/pipelines/base.py +1 -1
- transformers/pipelines/document_question_answering.py +1 -1
- transformers/pipelines/question_answering.py +1 -1
- transformers/pipelines/text_to_audio.py +2 -2
- transformers/processing_utils.py +127 -56
- transformers/quantizers/auto.py +2 -4
- transformers/quantizers/base.py +9 -64
- transformers/quantizers/quantizer_aqlm.py +1 -18
- transformers/quantizers/quantizer_auto_round.py +1 -10
- transformers/quantizers/quantizer_awq.py +3 -8
- transformers/quantizers/quantizer_bitnet.py +1 -6
- transformers/quantizers/quantizer_bnb_4bit.py +9 -49
- transformers/quantizers/quantizer_bnb_8bit.py +9 -19
- transformers/quantizers/quantizer_compressed_tensors.py +1 -4
- transformers/quantizers/quantizer_eetq.py +2 -12
- transformers/quantizers/quantizer_fbgemm_fp8.py +5 -14
- transformers/quantizers/quantizer_finegrained_fp8.py +15 -10
- transformers/quantizers/quantizer_fp_quant.py +4 -4
- transformers/quantizers/quantizer_gptq.py +1 -4
- transformers/quantizers/quantizer_higgs.py +2 -6
- transformers/quantizers/quantizer_mxfp4.py +2 -28
- transformers/quantizers/quantizer_quanto.py +14 -14
- transformers/quantizers/quantizer_spqr.py +3 -8
- transformers/quantizers/quantizer_torchao.py +28 -124
- transformers/quantizers/quantizer_vptq.py +1 -10
- transformers/testing_utils.py +28 -12
- transformers/tokenization_mistral_common.py +3 -2
- transformers/tokenization_utils_base.py +3 -2
- transformers/tokenization_utils_tokenizers.py +25 -2
- transformers/trainer.py +24 -2
- transformers/trainer_callback.py +8 -0
- transformers/trainer_seq2seq.py +4 -0
- transformers/training_args.py +8 -10
- transformers/utils/__init__.py +4 -0
- transformers/utils/attention_visualizer.py +4 -4
- transformers/utils/auto_docstring.py +34 -25
- transformers/utils/generic.py +20 -0
- transformers/utils/import_utils.py +51 -9
- transformers/utils/kernel_config.py +71 -18
- transformers/utils/quantization_config.py +8 -8
- transformers/video_processing_utils.py +16 -12
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/METADATA +5 -6
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/RECORD +671 -632
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/WHEEL +0 -0
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/entry_points.txt +0 -0
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/licenses/LICENSE +0 -0
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,433 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2025 the HuggingFace Team. All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from collections.abc import Callable
|
|
17
|
+
from typing import Optional, Union
|
|
18
|
+
|
|
19
|
+
import numpy as np
|
|
20
|
+
|
|
21
|
+
from ...activations import ACT2FN
|
|
22
|
+
from ...audio_utils import AudioInput, make_list_of_audio
|
|
23
|
+
from ...cache_utils import Cache
|
|
24
|
+
from ...feature_extraction_utils import BatchFeature
|
|
25
|
+
from ...modeling_layers import GradientCheckpointingLayer
|
|
26
|
+
from ...modeling_outputs import BaseModelOutput, CausalLMOutputWithPast
|
|
27
|
+
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
28
|
+
from ...processing_utils import Unpack
|
|
29
|
+
from ...utils import TransformersKwargs, auto_docstring, is_torch_available, logging
|
|
30
|
+
from ...utils.generic import check_model_inputs
|
|
31
|
+
from ..audioflamingo3.modeling_audioflamingo3 import (
|
|
32
|
+
AudioFlamingo3ForConditionalGeneration,
|
|
33
|
+
AudioFlamingo3MultiModalProjector,
|
|
34
|
+
AudioFlamingo3PreTrainedModel,
|
|
35
|
+
)
|
|
36
|
+
from ..audioflamingo3.processing_audioflamingo3 import AudioFlamingo3Processor, AudioFlamingo3ProcessorKwargs
|
|
37
|
+
from ..glm.modeling_glm import GlmRotaryEmbedding
|
|
38
|
+
from ..llama.modeling_llama import LlamaAttention, eager_attention_forward, rotate_half
|
|
39
|
+
from .configuration_glmasr import GlmAsrConfig, GlmAsrEncoderConfig
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
if is_torch_available():
|
|
43
|
+
import torch
|
|
44
|
+
from torch import nn
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
logger = logging.get_logger(__name__)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class GlmAsrProcessorKwargs(AudioFlamingo3ProcessorKwargs): ...
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class GlmAsrProcessor(AudioFlamingo3Processor):
|
|
54
|
+
r"""
|
|
55
|
+
Constructs an GlmAsr processor which wraps an GlmAsr feature extractor and an GlmAsr
|
|
56
|
+
tokenizer into a single processor.
|
|
57
|
+
|
|
58
|
+
[`GlmAsrProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and
|
|
59
|
+
[`Qwen2TokenizerFast`]. See the [`~GlmAsrProcessor.__call__`] for more information.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
feature_extractor ([`WhisperFeatureExtractor`]):
|
|
63
|
+
The feature extractor is a required input.
|
|
64
|
+
tokenizer ([`Qwen2TokenizerFast`]):
|
|
65
|
+
The tokenizer is a required input.
|
|
66
|
+
chat_template (`Optional[str]`, *optional*):
|
|
67
|
+
The Jinja template to use for formatting the conversation. If not provided, the tokenizer's default chat
|
|
68
|
+
template will be used.
|
|
69
|
+
audio_token (`Optional[str]`, *optional*, defaults to `"<|pad|>`"):
|
|
70
|
+
Special token used to represent audio inputs in the chat template.
|
|
71
|
+
default_transcription_prompt (`str`, *optional*, defaults to `"Please transcribe this audio into text"`):
|
|
72
|
+
Default prompt to use for transcription tasks when applying transcription requests.
|
|
73
|
+
max_audio_len (`int`, *optional*, defaults to 655):
|
|
74
|
+
Maximum length of audio sequences in seconds. Audio longer than this will be truncated.
|
|
75
|
+
655 gives approximately 8192 tokens, corresponding to the maximum sequence length of the text model.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
def __init__(
|
|
79
|
+
self,
|
|
80
|
+
feature_extractor,
|
|
81
|
+
tokenizer,
|
|
82
|
+
chat_template=None,
|
|
83
|
+
audio_token="<|pad|>",
|
|
84
|
+
default_transcription_prompt="Please transcribe this audio into text",
|
|
85
|
+
max_audio_len=655,
|
|
86
|
+
):
|
|
87
|
+
super().__init__(
|
|
88
|
+
feature_extractor,
|
|
89
|
+
tokenizer,
|
|
90
|
+
chat_template=chat_template,
|
|
91
|
+
audio_token=audio_token,
|
|
92
|
+
default_transcription_prompt=default_transcription_prompt,
|
|
93
|
+
max_audio_len=max_audio_len,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
def _get_audio_token_length(self, audio_lengths: "torch.Tensor") -> "torch.Tensor":
|
|
97
|
+
merge_factor = 4
|
|
98
|
+
for padding, kernel_size, stride in [(1, 3, 1), (1, 3, 2)]:
|
|
99
|
+
audio_lengths = (audio_lengths + 2 * padding - (kernel_size - 1) - 1) // stride + 1
|
|
100
|
+
|
|
101
|
+
num_tokens = (audio_lengths - merge_factor) // merge_factor + 1
|
|
102
|
+
return num_tokens
|
|
103
|
+
|
|
104
|
+
def apply_transcription_request(
|
|
105
|
+
self,
|
|
106
|
+
audio: Union[str, list[str], AudioInput],
|
|
107
|
+
prompt: Optional[Union[str, list[str]]] = None,
|
|
108
|
+
**kwargs: Unpack[GlmAsrProcessorKwargs],
|
|
109
|
+
) -> BatchFeature:
|
|
110
|
+
"""
|
|
111
|
+
Prepare inputs for automatic speech recognition without manually writing the default transcription prompt.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
audio (`str`, `list[str]`, `np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
|
|
115
|
+
Audio to transcribe. Strings are interpreted as local paths or URLs and will be loaded automatically by
|
|
116
|
+
the chat template loader; NumPy arrays and PyTorch tensors are forwarded directly.
|
|
117
|
+
prompt (`str` or `list[str]`, *optional*):
|
|
118
|
+
Custom prompt(s) to include in the user turn. A list must be the same length as the batch. When `None`,
|
|
119
|
+
each sample uses `"Transcribe the input speech."`.
|
|
120
|
+
**kwargs:
|
|
121
|
+
Additional keyword arguments forwarded to [`~AudioFlamingo3Processor.apply_chat_template`] (for example
|
|
122
|
+
`text_kwargs`, `audio_kwargs`, ...).
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
[`BatchFeature`]: Processor outputs ready to be passed to [`AudioFlamingo3ForConditionalGeneration.generate`].
|
|
126
|
+
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
if isinstance(audio, str):
|
|
130
|
+
audio_items: list[Union[str, np.ndarray]] = [audio]
|
|
131
|
+
elif isinstance(audio, (list, tuple)) and audio and all(isinstance(el, str) for el in audio):
|
|
132
|
+
audio_items = list(audio)
|
|
133
|
+
else:
|
|
134
|
+
audio_items = list(make_list_of_audio(audio))
|
|
135
|
+
if is_torch_available():
|
|
136
|
+
audio_items = [el.detach().cpu().numpy() if isinstance(el, torch.Tensor) else el for el in audio_items]
|
|
137
|
+
|
|
138
|
+
batch_size = len(audio_items)
|
|
139
|
+
if batch_size == 0:
|
|
140
|
+
raise ValueError("`audio` must contain at least one sample.")
|
|
141
|
+
|
|
142
|
+
if prompt is None:
|
|
143
|
+
prompts = [self.default_transcription_prompt] * batch_size
|
|
144
|
+
elif isinstance(prompt, str):
|
|
145
|
+
prompts = [prompt] * batch_size
|
|
146
|
+
elif isinstance(prompt, (list, tuple)):
|
|
147
|
+
if len(prompt) != batch_size:
|
|
148
|
+
raise ValueError(
|
|
149
|
+
f"Received {len(prompt)} prompt(s) for {batch_size} audio sample(s); counts must match."
|
|
150
|
+
)
|
|
151
|
+
prompts = []
|
|
152
|
+
for item in prompt:
|
|
153
|
+
if item is None:
|
|
154
|
+
prompts.append(self.default_transcription_prompt)
|
|
155
|
+
elif isinstance(item, str):
|
|
156
|
+
prompts.append(item)
|
|
157
|
+
else:
|
|
158
|
+
raise TypeError("Each prompt must be a string or `None`.")
|
|
159
|
+
else:
|
|
160
|
+
raise TypeError("`prompt` must be a string, a sequence of strings, or `None`.")
|
|
161
|
+
|
|
162
|
+
conversations = [
|
|
163
|
+
[
|
|
164
|
+
{
|
|
165
|
+
"role": "user",
|
|
166
|
+
"content": [
|
|
167
|
+
{"type": "audio", "path": audio_item}
|
|
168
|
+
if isinstance(audio_item, str)
|
|
169
|
+
else {"type": "audio", "audio": audio_item},
|
|
170
|
+
{"type": "text", "text": prompt_text},
|
|
171
|
+
],
|
|
172
|
+
}
|
|
173
|
+
]
|
|
174
|
+
for prompt_text, audio_item in zip(prompts, audio_items)
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
return self.apply_chat_template(
|
|
178
|
+
conversations,
|
|
179
|
+
tokenize=True,
|
|
180
|
+
add_generation_prompt=True,
|
|
181
|
+
return_dict=True,
|
|
182
|
+
**kwargs,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class GlmAsrRotaryEmbedding(GlmRotaryEmbedding): ...
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
|
190
|
+
cos = cos.unsqueeze(unsqueeze_dim)
|
|
191
|
+
sin = sin.unsqueeze(unsqueeze_dim)
|
|
192
|
+
|
|
193
|
+
rotary_dim = cos.shape[-1]
|
|
194
|
+
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
|
|
195
|
+
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
|
|
196
|
+
|
|
197
|
+
# Apply rotary embeddings on the first half or full tensor
|
|
198
|
+
q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
|
|
199
|
+
k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
|
|
200
|
+
|
|
201
|
+
# Concatenate back to full shape
|
|
202
|
+
q_embed = torch.cat([q_embed, q_pass], dim=-1)
|
|
203
|
+
k_embed = torch.cat([k_embed, k_pass], dim=-1)
|
|
204
|
+
return q_embed, k_embed
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
class GlmAsrAttention(LlamaAttention):
|
|
208
|
+
def __init__(self, config: GlmAsrConfig, layer_idx: int):
|
|
209
|
+
super().__init__(config, layer_idx)
|
|
210
|
+
self.is_causal = False
|
|
211
|
+
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
|
|
212
|
+
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
|
|
213
|
+
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
|
|
214
|
+
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=True)
|
|
215
|
+
|
|
216
|
+
def forward(
|
|
217
|
+
self,
|
|
218
|
+
hidden_states: torch.Tensor,
|
|
219
|
+
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
|
220
|
+
**kwargs: Unpack[TransformersKwargs],
|
|
221
|
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
222
|
+
input_shape = hidden_states.shape[:-1]
|
|
223
|
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
|
224
|
+
|
|
225
|
+
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
226
|
+
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
227
|
+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
|
228
|
+
|
|
229
|
+
cos, sin = position_embeddings
|
|
230
|
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
|
231
|
+
|
|
232
|
+
attention_interface: Callable = eager_attention_forward
|
|
233
|
+
if self.config._attn_implementation != "eager":
|
|
234
|
+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
|
235
|
+
|
|
236
|
+
attn_output, attn_weights = attention_interface(
|
|
237
|
+
self,
|
|
238
|
+
query_states,
|
|
239
|
+
key_states,
|
|
240
|
+
value_states,
|
|
241
|
+
attention_mask=None,
|
|
242
|
+
dropout=0.0 if not self.training else self.attention_dropout,
|
|
243
|
+
scaling=self.scaling,
|
|
244
|
+
**kwargs,
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
|
248
|
+
attn_output = self.o_proj(attn_output)
|
|
249
|
+
return attn_output, attn_weights
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class GlmAsrMLP(nn.Module):
|
|
253
|
+
def __init__(self, config):
|
|
254
|
+
super().__init__()
|
|
255
|
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
|
256
|
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
|
257
|
+
self.act_fn = ACT2FN[config.hidden_act]
|
|
258
|
+
|
|
259
|
+
def forward(self, hidden_states: torch.Tensor):
|
|
260
|
+
hidden_states = self.fc1(hidden_states)
|
|
261
|
+
hidden_states = self.act_fn(hidden_states)
|
|
262
|
+
hidden_states = self.fc2(hidden_states)
|
|
263
|
+
return hidden_states
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
class GlmAsrEncoderLayer(GradientCheckpointingLayer):
|
|
267
|
+
def __init__(self, config: GlmAsrConfig, layer_idx: int):
|
|
268
|
+
super().__init__()
|
|
269
|
+
self.hidden_size = config.hidden_size
|
|
270
|
+
|
|
271
|
+
self.self_attn = GlmAsrAttention(config=config, layer_idx=layer_idx)
|
|
272
|
+
|
|
273
|
+
self.mlp = GlmAsrMLP(config)
|
|
274
|
+
self.input_layernorm = nn.LayerNorm(config.hidden_size)
|
|
275
|
+
self.post_attention_layernorm = nn.LayerNorm(config.hidden_size)
|
|
276
|
+
|
|
277
|
+
def forward(
|
|
278
|
+
self,
|
|
279
|
+
hidden_states: torch.Tensor,
|
|
280
|
+
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
|
|
281
|
+
**kwargs: Unpack[TransformersKwargs],
|
|
282
|
+
) -> torch.Tensor:
|
|
283
|
+
residual = hidden_states
|
|
284
|
+
hidden_states = self.input_layernorm(hidden_states)
|
|
285
|
+
# Self Attention
|
|
286
|
+
hidden_states, _ = self.self_attn(
|
|
287
|
+
hidden_states=hidden_states,
|
|
288
|
+
position_embeddings=position_embeddings,
|
|
289
|
+
**kwargs,
|
|
290
|
+
)
|
|
291
|
+
hidden_states = residual + hidden_states
|
|
292
|
+
|
|
293
|
+
# Fully Connected
|
|
294
|
+
residual = hidden_states
|
|
295
|
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
|
296
|
+
hidden_states = self.mlp(hidden_states)
|
|
297
|
+
hidden_states = residual + hidden_states
|
|
298
|
+
return hidden_states
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
class GlmAsrPreTrainedModel(AudioFlamingo3PreTrainedModel): ...
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
# TODO: @eustlb, this is what WhisperEncoder should look like
|
|
305
|
+
class GlmAsrEncoder(GlmAsrPreTrainedModel):
|
|
306
|
+
config: GlmAsrEncoderConfig
|
|
307
|
+
main_input_name = "input_features"
|
|
308
|
+
input_modalities = "audio"
|
|
309
|
+
_no_split_modules = ["GlmAsrEncoderLayer"]
|
|
310
|
+
|
|
311
|
+
def __init__(self, config: GlmAsrEncoderConfig):
|
|
312
|
+
super().__init__(config)
|
|
313
|
+
self.conv1 = nn.Conv1d(config.num_mel_bins, config.hidden_size, kernel_size=3, padding=1)
|
|
314
|
+
self.conv2 = nn.Conv1d(config.hidden_size, config.hidden_size, kernel_size=3, stride=2, padding=1)
|
|
315
|
+
|
|
316
|
+
self.layers = nn.ModuleList(
|
|
317
|
+
[GlmAsrEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
|
318
|
+
)
|
|
319
|
+
self.norm = nn.LayerNorm(config.hidden_size)
|
|
320
|
+
self.rotary_emb = GlmAsrRotaryEmbedding(config=config)
|
|
321
|
+
self.gradient_checkpointing = False
|
|
322
|
+
self.post_init()
|
|
323
|
+
|
|
324
|
+
@check_model_inputs
|
|
325
|
+
@auto_docstring
|
|
326
|
+
def forward(self, input_features, **kwargs: Unpack[TransformersKwargs]):
|
|
327
|
+
inputs_embeds = nn.functional.gelu(self.conv1(input_features))
|
|
328
|
+
inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds))
|
|
329
|
+
inputs_embeds = inputs_embeds.transpose(1, 2)
|
|
330
|
+
|
|
331
|
+
hidden_states = inputs_embeds
|
|
332
|
+
position_embeddings = self.rotary_emb(
|
|
333
|
+
hidden_states, position_ids=torch.arange(hidden_states.shape[1], device=hidden_states.device)[None, :]
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
for encoder_layer in self.layers:
|
|
337
|
+
hidden_states = encoder_layer(hidden_states, position_embeddings=position_embeddings, **kwargs)
|
|
338
|
+
|
|
339
|
+
hidden_states = self.norm(hidden_states)
|
|
340
|
+
return BaseModelOutput(last_hidden_state=hidden_states)
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
class GlmAsrMultiModalProjector(AudioFlamingo3MultiModalProjector):
|
|
344
|
+
def __init__(self, config: GlmAsrConfig):
|
|
345
|
+
super().__init__()
|
|
346
|
+
self.linear_1 = nn.Linear(config.audio_config.intermediate_size, config.text_config.hidden_size * 2)
|
|
347
|
+
self.linear_2 = nn.Linear(config.text_config.hidden_size * 2, config.text_config.hidden_size)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
@auto_docstring(
|
|
351
|
+
custom_intro="""
|
|
352
|
+
The GlmAsr model which consists of a fine-tuned Whisper encoder, a multi-modal projector and a Llama language model.
|
|
353
|
+
"""
|
|
354
|
+
)
|
|
355
|
+
class GlmAsrForConditionalGeneration(AudioFlamingo3ForConditionalGeneration):
|
|
356
|
+
def get_audio_features(
|
|
357
|
+
self, input_features: torch.FloatTensor, input_features_mask: torch.Tensor
|
|
358
|
+
) -> torch.FloatTensor:
|
|
359
|
+
audio_outputs = self.audio_tower(input_features)
|
|
360
|
+
audio_hidden_states = audio_outputs.last_hidden_state
|
|
361
|
+
audio_hidden_states = audio_hidden_states.reshape(
|
|
362
|
+
input_features.shape[0], -1, self.config.audio_config.intermediate_size
|
|
363
|
+
)
|
|
364
|
+
audio_embeds = self.multi_modal_projector(audio_hidden_states)
|
|
365
|
+
|
|
366
|
+
audio_lengths = input_features_mask.sum(-1)
|
|
367
|
+
for padding, kernel_size, stride in [(1, 3, 1), (1, 3, 2)]:
|
|
368
|
+
audio_lengths = (audio_lengths + 2 * padding - (kernel_size - 1) - 1) // stride + 1
|
|
369
|
+
merge_factor = 4
|
|
370
|
+
post_lengths = (audio_lengths - merge_factor) // merge_factor + 1
|
|
371
|
+
|
|
372
|
+
valid_mask = torch.arange(audio_embeds.shape[1], device=post_lengths.device)[None, :] < post_lengths[:, None]
|
|
373
|
+
audio_embeds = audio_embeds[valid_mask.to(audio_embeds.device)]
|
|
374
|
+
return audio_embeds
|
|
375
|
+
|
|
376
|
+
def forward(
|
|
377
|
+
self,
|
|
378
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
379
|
+
input_features: Optional[torch.FloatTensor] = None,
|
|
380
|
+
input_features_mask: Optional[torch.Tensor] = None,
|
|
381
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
382
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
383
|
+
past_key_values: Optional[Cache] = None,
|
|
384
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
385
|
+
labels: Optional[torch.LongTensor] = None,
|
|
386
|
+
use_cache: Optional[bool] = None,
|
|
387
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
388
|
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
|
389
|
+
**kwargs: Unpack[TransformersKwargs],
|
|
390
|
+
) -> CausalLMOutputWithPast:
|
|
391
|
+
r"""
|
|
392
|
+
input_features_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`):
|
|
393
|
+
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
|
|
394
|
+
|
|
395
|
+
- 1 for tokens that are **not masked**,
|
|
396
|
+
- 0 for tokens that are **masked**.
|
|
397
|
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
398
|
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
399
|
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
|
400
|
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
401
|
+
|
|
402
|
+
Example:
|
|
403
|
+
|
|
404
|
+
```python
|
|
405
|
+
>>> from transformers import GlmAsrForConditionalGeneration, AutoProcessor
|
|
406
|
+
|
|
407
|
+
>>> model_id = "zai-org/GLM-ASR-Nano-2512"
|
|
408
|
+
>>> processor = AutoProcessor.from_pretrained(model_id)
|
|
409
|
+
>>> model = GlmAsrForConditionalGeneration.from_pretrained(model_id, dtype="auto", device_map="auto")
|
|
410
|
+
>>> inputs = processor.apply_transcription_request("https://huggingface.co/datasets/hf-internal-testing/dummy-audio-samples/resolve/main/bcn_weather.mp3")
|
|
411
|
+
|
|
412
|
+
>>> inputs = inputs.to(model.device, dtype=model.dtype)
|
|
413
|
+
|
|
414
|
+
>>> outputs = model.generate(**inputs, do_sample=False, max_new_tokens=500)
|
|
415
|
+
|
|
416
|
+
>>> decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1] :], skip_special_tokens=True)
|
|
417
|
+
>>> print(decoded_outputs)
|
|
418
|
+
```"""
|
|
419
|
+
return super().forward(
|
|
420
|
+
input_ids=input_ids,
|
|
421
|
+
attention_mask=attention_mask,
|
|
422
|
+
position_ids=position_ids,
|
|
423
|
+
past_key_values=past_key_values,
|
|
424
|
+
inputs_embeds=inputs_embeds,
|
|
425
|
+
labels=labels,
|
|
426
|
+
use_cache=use_cache,
|
|
427
|
+
cache_position=cache_position,
|
|
428
|
+
logits_to_keep=logits_to_keep,
|
|
429
|
+
**kwargs,
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
__all__ = ["GlmAsrEncoder", "GlmAsrForConditionalGeneration", "GlmAsrProcessor", "GlmAsrPreTrainedModel"]
|