transformers 5.0.0__py3-none-any.whl → 5.0.0rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- transformers/__init__.py +36 -55
- transformers/activations.py +1 -1
- transformers/audio_utils.py +33 -32
- transformers/cache_utils.py +139 -32
- transformers/cli/chat.py +3 -3
- transformers/cli/serve.py +19 -49
- transformers/cli/transformers.py +1 -2
- transformers/configuration_utils.py +155 -129
- transformers/conversion_mapping.py +22 -158
- transformers/convert_slow_tokenizer.py +17 -227
- transformers/core_model_loading.py +185 -528
- transformers/data/data_collator.py +4 -12
- transformers/data/processors/glue.py +1 -0
- transformers/data/processors/utils.py +1 -0
- transformers/data/processors/xnli.py +1 -0
- transformers/dependency_versions_check.py +1 -0
- transformers/dependency_versions_table.py +7 -5
- transformers/distributed/configuration_utils.py +2 -1
- transformers/dynamic_module_utils.py +25 -24
- transformers/feature_extraction_sequence_utils.py +23 -19
- transformers/feature_extraction_utils.py +33 -64
- transformers/file_utils.py +1 -0
- transformers/generation/__init__.py +1 -11
- transformers/generation/candidate_generator.py +33 -80
- transformers/generation/configuration_utils.py +133 -189
- transformers/generation/continuous_batching/__init__.py +1 -4
- transformers/generation/continuous_batching/cache.py +25 -83
- transformers/generation/continuous_batching/cache_manager.py +45 -155
- transformers/generation/continuous_batching/continuous_api.py +147 -270
- transformers/generation/continuous_batching/requests.py +3 -51
- transformers/generation/continuous_batching/scheduler.py +105 -160
- transformers/generation/logits_process.py +128 -0
- transformers/generation/stopping_criteria.py +1 -1
- transformers/generation/streamers.py +1 -0
- transformers/generation/utils.py +123 -122
- transformers/generation/watermarking.py +6 -8
- transformers/hf_argparser.py +13 -9
- transformers/hyperparameter_search.py +2 -1
- transformers/image_processing_base.py +23 -12
- transformers/image_processing_utils.py +15 -11
- transformers/image_processing_utils_fast.py +75 -85
- transformers/image_transforms.py +42 -73
- transformers/image_utils.py +32 -30
- transformers/initialization.py +0 -37
- transformers/integrations/__init__.py +2 -16
- transformers/integrations/accelerate.py +113 -58
- transformers/integrations/aqlm.py +66 -36
- transformers/integrations/awq.py +516 -45
- transformers/integrations/bitnet.py +105 -47
- transformers/integrations/bitsandbytes.py +202 -91
- transformers/integrations/deepspeed.py +4 -161
- transformers/integrations/eetq.py +82 -84
- transformers/integrations/executorch.py +1 -1
- transformers/integrations/fbgemm_fp8.py +145 -190
- transformers/integrations/finegrained_fp8.py +215 -249
- transformers/integrations/flash_attention.py +3 -3
- transformers/integrations/flex_attention.py +1 -1
- transformers/integrations/fp_quant.py +0 -90
- transformers/integrations/ggml.py +2 -11
- transformers/integrations/higgs.py +62 -37
- transformers/integrations/hub_kernels.py +8 -65
- transformers/integrations/integration_utils.py +3 -47
- transformers/integrations/mistral.py +0 -12
- transformers/integrations/mxfp4.py +80 -33
- transformers/integrations/peft.py +191 -483
- transformers/integrations/quanto.py +56 -77
- transformers/integrations/spqr.py +90 -42
- transformers/integrations/tensor_parallel.py +221 -167
- transformers/integrations/torchao.py +43 -35
- transformers/integrations/vptq.py +59 -40
- transformers/kernels/__init__.py +0 -0
- transformers/{models/pe_audio_video/processing_pe_audio_video.py → kernels/falcon_mamba/__init__.py} +3 -12
- transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py +529 -0
- transformers/loss/loss_utils.py +0 -2
- transformers/masking_utils.py +55 -51
- transformers/model_debugging_utils.py +5 -4
- transformers/modelcard.py +194 -15
- transformers/modeling_attn_mask_utils.py +19 -19
- transformers/modeling_flash_attention_utils.py +27 -27
- transformers/modeling_gguf_pytorch_utils.py +24 -79
- transformers/modeling_layers.py +22 -21
- transformers/modeling_outputs.py +253 -242
- transformers/modeling_rope_utils.py +117 -138
- transformers/modeling_utils.py +739 -850
- transformers/models/__init__.py +0 -27
- transformers/models/afmoe/configuration_afmoe.py +33 -40
- transformers/models/afmoe/modeling_afmoe.py +54 -42
- transformers/models/afmoe/modular_afmoe.py +33 -23
- transformers/models/aimv2/configuration_aimv2.py +10 -2
- transformers/models/aimv2/modeling_aimv2.py +42 -47
- transformers/models/aimv2/modular_aimv2.py +19 -17
- transformers/models/albert/configuration_albert.py +2 -8
- transformers/models/albert/modeling_albert.py +69 -70
- transformers/models/albert/tokenization_albert.py +14 -5
- transformers/models/align/configuration_align.py +6 -8
- transformers/models/align/modeling_align.py +89 -94
- transformers/models/align/processing_align.py +30 -2
- transformers/models/altclip/configuration_altclip.py +7 -4
- transformers/models/altclip/modeling_altclip.py +103 -114
- transformers/models/altclip/processing_altclip.py +15 -2
- transformers/models/apertus/__init__.py +1 -0
- transformers/models/apertus/configuration_apertus.py +28 -23
- transformers/models/apertus/modeling_apertus.py +40 -39
- transformers/models/apertus/modular_apertus.py +38 -37
- transformers/models/arcee/configuration_arcee.py +30 -25
- transformers/models/arcee/modeling_arcee.py +39 -36
- transformers/models/arcee/modular_arcee.py +23 -20
- transformers/models/aria/configuration_aria.py +44 -31
- transformers/models/aria/image_processing_aria.py +27 -25
- transformers/models/aria/modeling_aria.py +106 -110
- transformers/models/aria/modular_aria.py +127 -118
- transformers/models/aria/processing_aria.py +35 -28
- transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +1 -0
- transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +6 -3
- transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +8 -6
- transformers/models/audioflamingo3/__init__.py +1 -0
- transformers/models/audioflamingo3/configuration_audioflamingo3.py +1 -0
- transformers/models/audioflamingo3/modeling_audioflamingo3.py +49 -58
- transformers/models/audioflamingo3/modular_audioflamingo3.py +43 -53
- transformers/models/audioflamingo3/processing_audioflamingo3.py +30 -33
- transformers/models/auto/auto_factory.py +7 -6
- transformers/models/auto/configuration_auto.py +5 -66
- transformers/models/auto/feature_extraction_auto.py +10 -14
- transformers/models/auto/image_processing_auto.py +41 -32
- transformers/models/auto/modeling_auto.py +188 -46
- transformers/models/auto/processing_auto.py +11 -24
- transformers/models/auto/tokenization_auto.py +588 -171
- transformers/models/auto/video_processing_auto.py +10 -12
- transformers/models/autoformer/configuration_autoformer.py +7 -4
- transformers/models/autoformer/modeling_autoformer.py +101 -104
- transformers/models/aya_vision/configuration_aya_vision.py +1 -4
- transformers/models/aya_vision/modeling_aya_vision.py +102 -71
- transformers/models/aya_vision/modular_aya_vision.py +74 -46
- transformers/models/aya_vision/processing_aya_vision.py +53 -25
- transformers/models/bamba/configuration_bamba.py +39 -34
- transformers/models/bamba/modeling_bamba.py +86 -82
- transformers/models/bamba/modular_bamba.py +72 -70
- transformers/models/bark/configuration_bark.py +8 -6
- transformers/models/bark/generation_configuration_bark.py +5 -3
- transformers/models/bark/modeling_bark.py +57 -54
- transformers/models/bark/processing_bark.py +41 -19
- transformers/models/bart/configuration_bart.py +6 -9
- transformers/models/bart/modeling_bart.py +126 -135
- transformers/models/barthez/tokenization_barthez.py +11 -3
- transformers/models/bartpho/tokenization_bartpho.py +7 -6
- transformers/models/beit/configuration_beit.py +11 -0
- transformers/models/beit/image_processing_beit.py +56 -53
- transformers/models/beit/image_processing_beit_fast.py +12 -10
- transformers/models/beit/modeling_beit.py +60 -69
- transformers/models/bert/configuration_bert.py +2 -12
- transformers/models/bert/modeling_bert.py +122 -114
- transformers/models/bert/tokenization_bert.py +23 -8
- transformers/models/bert/tokenization_bert_legacy.py +5 -3
- transformers/models/bert_generation/configuration_bert_generation.py +2 -17
- transformers/models/bert_generation/modeling_bert_generation.py +49 -49
- transformers/models/bert_generation/tokenization_bert_generation.py +3 -2
- transformers/models/bert_japanese/tokenization_bert_japanese.py +6 -5
- transformers/models/bertweet/tokenization_bertweet.py +3 -1
- transformers/models/big_bird/configuration_big_bird.py +9 -12
- transformers/models/big_bird/modeling_big_bird.py +109 -116
- transformers/models/big_bird/tokenization_big_bird.py +43 -16
- transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +9 -9
- transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +117 -130
- transformers/models/biogpt/configuration_biogpt.py +2 -8
- transformers/models/biogpt/modeling_biogpt.py +76 -72
- transformers/models/biogpt/modular_biogpt.py +66 -62
- transformers/models/biogpt/tokenization_biogpt.py +5 -3
- transformers/models/bit/configuration_bit.py +1 -0
- transformers/models/bit/image_processing_bit.py +24 -21
- transformers/models/bit/image_processing_bit_fast.py +1 -0
- transformers/models/bit/modeling_bit.py +12 -25
- transformers/models/bitnet/configuration_bitnet.py +28 -23
- transformers/models/bitnet/modeling_bitnet.py +39 -36
- transformers/models/bitnet/modular_bitnet.py +6 -4
- transformers/models/blenderbot/configuration_blenderbot.py +5 -8
- transformers/models/blenderbot/modeling_blenderbot.py +96 -77
- transformers/models/blenderbot/tokenization_blenderbot.py +24 -18
- transformers/models/blenderbot_small/configuration_blenderbot_small.py +5 -8
- transformers/models/blenderbot_small/modeling_blenderbot_small.py +69 -79
- transformers/models/blenderbot_small/tokenization_blenderbot_small.py +3 -1
- transformers/models/blip/configuration_blip.py +10 -9
- transformers/models/blip/image_processing_blip.py +20 -17
- transformers/models/blip/image_processing_blip_fast.py +1 -0
- transformers/models/blip/modeling_blip.py +108 -117
- transformers/models/blip/modeling_blip_text.py +65 -73
- transformers/models/blip/processing_blip.py +36 -5
- transformers/models/blip_2/configuration_blip_2.py +2 -2
- transformers/models/blip_2/modeling_blip_2.py +118 -146
- transformers/models/blip_2/processing_blip_2.py +38 -8
- transformers/models/bloom/configuration_bloom.py +2 -5
- transformers/models/bloom/modeling_bloom.py +104 -77
- transformers/models/blt/configuration_blt.py +86 -94
- transformers/models/blt/modeling_blt.py +81 -238
- transformers/models/blt/modular_blt.py +65 -228
- transformers/models/bridgetower/configuration_bridgetower.py +2 -7
- transformers/models/bridgetower/image_processing_bridgetower.py +35 -34
- transformers/models/bridgetower/image_processing_bridgetower_fast.py +16 -13
- transformers/models/bridgetower/modeling_bridgetower.py +119 -141
- transformers/models/bridgetower/processing_bridgetower.py +16 -2
- transformers/models/bros/configuration_bros.py +18 -24
- transformers/models/bros/modeling_bros.py +80 -90
- transformers/models/bros/processing_bros.py +12 -2
- transformers/models/byt5/tokenization_byt5.py +6 -4
- transformers/models/camembert/configuration_camembert.py +2 -8
- transformers/models/camembert/modeling_camembert.py +195 -196
- transformers/models/camembert/modular_camembert.py +54 -51
- transformers/models/camembert/tokenization_camembert.py +13 -6
- transformers/models/canine/configuration_canine.py +2 -4
- transformers/models/canine/modeling_canine.py +75 -84
- transformers/models/canine/tokenization_canine.py +1 -2
- transformers/models/chameleon/configuration_chameleon.py +34 -29
- transformers/models/chameleon/image_processing_chameleon.py +24 -21
- transformers/models/chameleon/image_processing_chameleon_fast.py +6 -5
- transformers/models/chameleon/modeling_chameleon.py +93 -142
- transformers/models/chameleon/processing_chameleon.py +41 -16
- transformers/models/chinese_clip/configuration_chinese_clip.py +8 -10
- transformers/models/chinese_clip/image_processing_chinese_clip.py +24 -21
- transformers/models/chinese_clip/image_processing_chinese_clip_fast.py +1 -0
- transformers/models/chinese_clip/modeling_chinese_clip.py +92 -96
- transformers/models/chinese_clip/processing_chinese_clip.py +15 -2
- transformers/models/clap/configuration_clap.py +9 -4
- transformers/models/clap/feature_extraction_clap.py +12 -11
- transformers/models/clap/modeling_clap.py +123 -136
- transformers/models/clap/processing_clap.py +15 -2
- transformers/models/clip/configuration_clip.py +2 -4
- transformers/models/clip/image_processing_clip.py +24 -21
- transformers/models/clip/image_processing_clip_fast.py +1 -9
- transformers/models/clip/modeling_clip.py +65 -65
- transformers/models/clip/processing_clip.py +14 -2
- transformers/models/clip/tokenization_clip.py +46 -21
- transformers/models/clipseg/configuration_clipseg.py +2 -4
- transformers/models/clipseg/modeling_clipseg.py +109 -119
- transformers/models/clipseg/processing_clipseg.py +42 -19
- transformers/models/clvp/configuration_clvp.py +5 -15
- transformers/models/clvp/feature_extraction_clvp.py +10 -7
- transformers/models/clvp/modeling_clvp.py +146 -155
- transformers/models/clvp/number_normalizer.py +2 -1
- transformers/models/clvp/processing_clvp.py +20 -3
- transformers/models/clvp/tokenization_clvp.py +64 -1
- transformers/models/code_llama/tokenization_code_llama.py +44 -18
- transformers/models/codegen/configuration_codegen.py +4 -4
- transformers/models/codegen/modeling_codegen.py +53 -63
- transformers/models/codegen/tokenization_codegen.py +47 -17
- transformers/models/cohere/configuration_cohere.py +30 -25
- transformers/models/cohere/modeling_cohere.py +42 -40
- transformers/models/cohere/modular_cohere.py +29 -26
- transformers/models/cohere/tokenization_cohere.py +46 -15
- transformers/models/cohere2/configuration_cohere2.py +32 -31
- transformers/models/cohere2/modeling_cohere2.py +44 -42
- transformers/models/cohere2/modular_cohere2.py +54 -54
- transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +14 -13
- transformers/models/cohere2_vision/modeling_cohere2_vision.py +58 -59
- transformers/models/cohere2_vision/modular_cohere2_vision.py +46 -45
- transformers/models/cohere2_vision/processing_cohere2_vision.py +36 -6
- transformers/models/colpali/configuration_colpali.py +1 -0
- transformers/models/colpali/modeling_colpali.py +16 -14
- transformers/models/colpali/modular_colpali.py +51 -11
- transformers/models/colpali/processing_colpali.py +52 -14
- transformers/models/colqwen2/modeling_colqwen2.py +28 -28
- transformers/models/colqwen2/modular_colqwen2.py +74 -37
- transformers/models/colqwen2/processing_colqwen2.py +52 -16
- transformers/models/conditional_detr/configuration_conditional_detr.py +2 -1
- transformers/models/conditional_detr/image_processing_conditional_detr.py +70 -67
- transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +36 -36
- transformers/models/conditional_detr/modeling_conditional_detr.py +87 -99
- transformers/models/conditional_detr/modular_conditional_detr.py +3 -49
- transformers/models/convbert/configuration_convbert.py +8 -11
- transformers/models/convbert/modeling_convbert.py +87 -94
- transformers/models/convbert/tokenization_convbert.py +1 -0
- transformers/models/convnext/configuration_convnext.py +1 -0
- transformers/models/convnext/image_processing_convnext.py +23 -20
- transformers/models/convnext/image_processing_convnext_fast.py +21 -16
- transformers/models/convnext/modeling_convnext.py +12 -9
- transformers/models/convnextv2/configuration_convnextv2.py +1 -0
- transformers/models/convnextv2/modeling_convnextv2.py +12 -9
- transformers/models/cpm/tokenization_cpm.py +7 -6
- transformers/models/cpm/tokenization_cpm_fast.py +5 -3
- transformers/models/cpmant/configuration_cpmant.py +1 -4
- transformers/models/cpmant/modeling_cpmant.py +40 -38
- transformers/models/cpmant/tokenization_cpmant.py +3 -1
- transformers/models/csm/configuration_csm.py +66 -58
- transformers/models/csm/generation_csm.py +35 -31
- transformers/models/csm/modeling_csm.py +85 -85
- transformers/models/csm/modular_csm.py +58 -58
- transformers/models/csm/processing_csm.py +68 -25
- transformers/models/ctrl/configuration_ctrl.py +1 -16
- transformers/models/ctrl/modeling_ctrl.py +44 -54
- transformers/models/ctrl/tokenization_ctrl.py +1 -0
- transformers/models/cvt/configuration_cvt.py +1 -0
- transformers/models/cvt/modeling_cvt.py +16 -20
- transformers/models/cwm/__init__.py +1 -0
- transformers/models/cwm/configuration_cwm.py +12 -8
- transformers/models/cwm/modeling_cwm.py +39 -37
- transformers/models/cwm/modular_cwm.py +12 -10
- transformers/models/d_fine/configuration_d_fine.py +5 -7
- transformers/models/d_fine/modeling_d_fine.py +128 -138
- transformers/models/d_fine/modular_d_fine.py +18 -33
- transformers/models/dab_detr/configuration_dab_detr.py +3 -6
- transformers/models/dab_detr/modeling_dab_detr.py +75 -81
- transformers/models/dac/configuration_dac.py +1 -0
- transformers/models/dac/feature_extraction_dac.py +9 -6
- transformers/models/dac/modeling_dac.py +26 -24
- transformers/models/data2vec/configuration_data2vec_audio.py +2 -4
- transformers/models/data2vec/configuration_data2vec_text.py +3 -11
- transformers/models/data2vec/configuration_data2vec_vision.py +1 -0
- transformers/models/data2vec/modeling_data2vec_audio.py +56 -57
- transformers/models/data2vec/modeling_data2vec_text.py +93 -98
- transformers/models/data2vec/modeling_data2vec_vision.py +45 -49
- transformers/models/data2vec/modular_data2vec_audio.py +1 -6
- transformers/models/data2vec/modular_data2vec_text.py +54 -58
- transformers/models/dbrx/configuration_dbrx.py +22 -36
- transformers/models/dbrx/modeling_dbrx.py +45 -42
- transformers/models/dbrx/modular_dbrx.py +33 -31
- transformers/models/deberta/configuration_deberta.py +1 -6
- transformers/models/deberta/modeling_deberta.py +60 -64
- transformers/models/deberta/tokenization_deberta.py +21 -9
- transformers/models/deberta_v2/configuration_deberta_v2.py +1 -6
- transformers/models/deberta_v2/modeling_deberta_v2.py +65 -71
- transformers/models/deberta_v2/tokenization_deberta_v2.py +29 -11
- transformers/models/decision_transformer/configuration_decision_transformer.py +2 -3
- transformers/models/decision_transformer/modeling_decision_transformer.py +56 -60
- transformers/models/deepseek_v2/configuration_deepseek_v2.py +44 -39
- transformers/models/deepseek_v2/modeling_deepseek_v2.py +43 -43
- transformers/models/deepseek_v2/modular_deepseek_v2.py +49 -48
- transformers/models/deepseek_v3/configuration_deepseek_v3.py +45 -40
- transformers/models/deepseek_v3/modeling_deepseek_v3.py +42 -45
- transformers/models/deepseek_v3/modular_deepseek_v3.py +9 -14
- transformers/models/deepseek_vl/configuration_deepseek_vl.py +3 -2
- transformers/models/deepseek_vl/image_processing_deepseek_vl.py +26 -25
- transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +10 -10
- transformers/models/deepseek_vl/modeling_deepseek_vl.py +48 -57
- transformers/models/deepseek_vl/modular_deepseek_vl.py +43 -14
- transformers/models/deepseek_vl/processing_deepseek_vl.py +41 -10
- transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +5 -3
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +35 -35
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +24 -20
- transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +61 -109
- transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +118 -146
- transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py +44 -12
- transformers/models/deformable_detr/configuration_deformable_detr.py +3 -2
- transformers/models/deformable_detr/image_processing_deformable_detr.py +61 -59
- transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +28 -28
- transformers/models/deformable_detr/modeling_deformable_detr.py +82 -88
- transformers/models/deformable_detr/modular_deformable_detr.py +3 -1
- transformers/models/deit/configuration_deit.py +1 -0
- transformers/models/deit/image_processing_deit.py +21 -18
- transformers/models/deit/image_processing_deit_fast.py +1 -0
- transformers/models/deit/modeling_deit.py +22 -24
- transformers/models/depth_anything/configuration_depth_anything.py +4 -2
- transformers/models/depth_anything/modeling_depth_anything.py +10 -10
- transformers/models/depth_pro/configuration_depth_pro.py +1 -0
- transformers/models/depth_pro/image_processing_depth_pro.py +23 -22
- transformers/models/depth_pro/image_processing_depth_pro_fast.py +10 -8
- transformers/models/depth_pro/modeling_depth_pro.py +27 -31
- transformers/models/detr/configuration_detr.py +2 -1
- transformers/models/detr/image_processing_detr.py +66 -64
- transformers/models/detr/image_processing_detr_fast.py +34 -33
- transformers/models/detr/modeling_detr.py +79 -95
- transformers/models/dia/configuration_dia.py +15 -9
- transformers/models/dia/feature_extraction_dia.py +9 -6
- transformers/models/dia/generation_dia.py +50 -48
- transformers/models/dia/modeling_dia.py +69 -78
- transformers/models/dia/modular_dia.py +56 -64
- transformers/models/dia/processing_dia.py +29 -39
- transformers/models/dia/tokenization_dia.py +6 -3
- transformers/models/diffllama/configuration_diffllama.py +30 -25
- transformers/models/diffllama/modeling_diffllama.py +49 -46
- transformers/models/diffllama/modular_diffllama.py +19 -17
- transformers/models/dinat/configuration_dinat.py +1 -0
- transformers/models/dinat/modeling_dinat.py +44 -47
- transformers/models/dinov2/configuration_dinov2.py +1 -0
- transformers/models/dinov2/modeling_dinov2.py +15 -15
- transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +1 -1
- transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +15 -16
- transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +9 -9
- transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +7 -4
- transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +6 -3
- transformers/models/dinov3_vit/configuration_dinov3_vit.py +8 -5
- transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +9 -7
- transformers/models/dinov3_vit/modeling_dinov3_vit.py +18 -19
- transformers/models/dinov3_vit/modular_dinov3_vit.py +15 -16
- transformers/models/distilbert/configuration_distilbert.py +2 -8
- transformers/models/distilbert/modeling_distilbert.py +55 -55
- transformers/models/distilbert/tokenization_distilbert.py +1 -13
- transformers/models/doge/__init__.py +1 -0
- transformers/models/doge/configuration_doge.py +32 -39
- transformers/models/doge/modeling_doge.py +49 -45
- transformers/models/doge/modular_doge.py +63 -71
- transformers/models/donut/configuration_donut_swin.py +1 -0
- transformers/models/donut/image_processing_donut.py +29 -26
- transformers/models/donut/image_processing_donut_fast.py +15 -9
- transformers/models/donut/modeling_donut_swin.py +58 -62
- transformers/models/donut/processing_donut.py +26 -5
- transformers/models/dots1/configuration_dots1.py +33 -41
- transformers/models/dots1/modeling_dots1.py +45 -54
- transformers/models/dots1/modular_dots1.py +4 -5
- transformers/models/dpr/configuration_dpr.py +2 -19
- transformers/models/dpr/modeling_dpr.py +39 -42
- transformers/models/dpr/tokenization_dpr.py +9 -19
- transformers/models/dpr/tokenization_dpr_fast.py +9 -7
- transformers/models/dpt/configuration_dpt.py +2 -1
- transformers/models/dpt/image_processing_dpt.py +66 -65
- transformers/models/dpt/image_processing_dpt_fast.py +20 -18
- transformers/models/dpt/modeling_dpt.py +30 -32
- transformers/models/dpt/modular_dpt.py +17 -15
- transformers/models/edgetam/configuration_edgetam.py +3 -2
- transformers/models/edgetam/modeling_edgetam.py +86 -86
- transformers/models/edgetam/modular_edgetam.py +26 -21
- transformers/models/edgetam_video/__init__.py +1 -0
- transformers/models/edgetam_video/configuration_edgetam_video.py +1 -0
- transformers/models/edgetam_video/modeling_edgetam_video.py +158 -169
- transformers/models/edgetam_video/modular_edgetam_video.py +37 -30
- transformers/models/efficientloftr/configuration_efficientloftr.py +5 -4
- transformers/models/efficientloftr/image_processing_efficientloftr.py +16 -14
- transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +9 -9
- transformers/models/efficientloftr/modeling_efficientloftr.py +38 -59
- transformers/models/efficientloftr/modular_efficientloftr.py +3 -1
- transformers/models/efficientnet/configuration_efficientnet.py +1 -0
- transformers/models/efficientnet/image_processing_efficientnet.py +32 -28
- transformers/models/efficientnet/image_processing_efficientnet_fast.py +19 -17
- transformers/models/efficientnet/modeling_efficientnet.py +15 -19
- transformers/models/electra/configuration_electra.py +3 -13
- transformers/models/electra/modeling_electra.py +103 -108
- transformers/models/emu3/configuration_emu3.py +17 -13
- transformers/models/emu3/image_processing_emu3.py +39 -44
- transformers/models/emu3/modeling_emu3.py +108 -148
- transformers/models/emu3/modular_emu3.py +73 -115
- transformers/models/emu3/processing_emu3.py +43 -18
- transformers/models/encodec/configuration_encodec.py +4 -2
- transformers/models/encodec/feature_extraction_encodec.py +13 -10
- transformers/models/encodec/modeling_encodec.py +29 -39
- transformers/models/encoder_decoder/configuration_encoder_decoder.py +2 -12
- transformers/models/encoder_decoder/modeling_encoder_decoder.py +43 -37
- transformers/models/eomt/configuration_eomt.py +1 -0
- transformers/models/eomt/image_processing_eomt.py +56 -66
- transformers/models/eomt/image_processing_eomt_fast.py +33 -76
- transformers/models/eomt/modeling_eomt.py +18 -23
- transformers/models/eomt/modular_eomt.py +13 -18
- transformers/models/ernie/configuration_ernie.py +3 -24
- transformers/models/ernie/modeling_ernie.py +132 -127
- transformers/models/ernie/modular_ernie.py +103 -97
- transformers/models/ernie4_5/configuration_ernie4_5.py +27 -23
- transformers/models/ernie4_5/modeling_ernie4_5.py +38 -36
- transformers/models/ernie4_5/modular_ernie4_5.py +4 -3
- transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +36 -32
- transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +55 -56
- transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +46 -18
- transformers/models/esm/configuration_esm.py +15 -11
- transformers/models/esm/modeling_esm.py +34 -38
- transformers/models/esm/modeling_esmfold.py +49 -53
- transformers/models/esm/openfold_utils/chunk_utils.py +6 -6
- transformers/models/esm/openfold_utils/loss.py +2 -1
- transformers/models/esm/openfold_utils/protein.py +16 -15
- transformers/models/esm/openfold_utils/tensor_utils.py +6 -6
- transformers/models/esm/tokenization_esm.py +4 -2
- transformers/models/evolla/configuration_evolla.py +40 -50
- transformers/models/evolla/modeling_evolla.py +66 -71
- transformers/models/evolla/modular_evolla.py +47 -53
- transformers/models/evolla/processing_evolla.py +35 -23
- transformers/models/exaone4/configuration_exaone4.py +25 -23
- transformers/models/exaone4/modeling_exaone4.py +38 -35
- transformers/models/exaone4/modular_exaone4.py +46 -44
- transformers/models/falcon/configuration_falcon.py +26 -31
- transformers/models/falcon/modeling_falcon.py +80 -82
- transformers/models/falcon_h1/configuration_falcon_h1.py +51 -45
- transformers/models/falcon_h1/modeling_falcon_h1.py +82 -85
- transformers/models/falcon_h1/modular_falcon_h1.py +51 -56
- transformers/models/falcon_mamba/configuration_falcon_mamba.py +2 -1
- transformers/models/falcon_mamba/modeling_falcon_mamba.py +82 -75
- transformers/models/falcon_mamba/modular_falcon_mamba.py +45 -28
- transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +6 -2
- transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +60 -76
- transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +3 -2
- transformers/models/flaubert/configuration_flaubert.py +5 -10
- transformers/models/flaubert/modeling_flaubert.py +143 -145
- transformers/models/flaubert/tokenization_flaubert.py +5 -3
- transformers/models/flava/configuration_flava.py +6 -5
- transformers/models/flava/image_processing_flava.py +67 -66
- transformers/models/flava/image_processing_flava_fast.py +49 -46
- transformers/models/flava/modeling_flava.py +136 -153
- transformers/models/flava/processing_flava.py +12 -2
- transformers/models/flex_olmo/__init__.py +1 -0
- transformers/models/flex_olmo/configuration_flex_olmo.py +32 -28
- transformers/models/flex_olmo/modeling_flex_olmo.py +47 -47
- transformers/models/flex_olmo/modular_flex_olmo.py +44 -40
- transformers/models/florence2/configuration_florence2.py +1 -0
- transformers/models/florence2/modeling_florence2.py +69 -111
- transformers/models/florence2/modular_florence2.py +101 -104
- transformers/models/florence2/processing_florence2.py +47 -18
- transformers/models/fnet/configuration_fnet.py +2 -6
- transformers/models/fnet/modeling_fnet.py +80 -83
- transformers/models/fnet/tokenization_fnet.py +1 -0
- transformers/models/focalnet/configuration_focalnet.py +1 -0
- transformers/models/focalnet/modeling_focalnet.py +45 -51
- transformers/models/fsmt/configuration_fsmt.py +17 -12
- transformers/models/fsmt/modeling_fsmt.py +48 -49
- transformers/models/fsmt/tokenization_fsmt.py +5 -3
- transformers/models/funnel/configuration_funnel.py +1 -8
- transformers/models/funnel/modeling_funnel.py +93 -99
- transformers/models/funnel/tokenization_funnel.py +27 -17
- transformers/models/fuyu/configuration_fuyu.py +34 -28
- transformers/models/fuyu/image_processing_fuyu.py +31 -29
- transformers/models/fuyu/image_processing_fuyu_fast.py +17 -17
- transformers/models/fuyu/modeling_fuyu.py +53 -53
- transformers/models/fuyu/processing_fuyu.py +34 -23
- transformers/models/gemma/configuration_gemma.py +30 -25
- transformers/models/gemma/modeling_gemma.py +50 -46
- transformers/models/gemma/modular_gemma.py +47 -42
- transformers/models/gemma/tokenization_gemma.py +30 -10
- transformers/models/gemma2/configuration_gemma2.py +35 -30
- transformers/models/gemma2/modeling_gemma2.py +42 -39
- transformers/models/gemma2/modular_gemma2.py +66 -63
- transformers/models/gemma3/configuration_gemma3.py +44 -44
- transformers/models/gemma3/image_processing_gemma3.py +31 -29
- transformers/models/gemma3/image_processing_gemma3_fast.py +13 -11
- transformers/models/gemma3/modeling_gemma3.py +207 -159
- transformers/models/gemma3/modular_gemma3.py +204 -153
- transformers/models/gemma3/processing_gemma3.py +5 -5
- transformers/models/gemma3n/configuration_gemma3n.py +26 -36
- transformers/models/gemma3n/feature_extraction_gemma3n.py +11 -9
- transformers/models/gemma3n/modeling_gemma3n.py +356 -222
- transformers/models/gemma3n/modular_gemma3n.py +207 -230
- transformers/models/gemma3n/processing_gemma3n.py +26 -12
- transformers/models/git/configuration_git.py +8 -5
- transformers/models/git/modeling_git.py +204 -266
- transformers/models/git/processing_git.py +14 -2
- transformers/models/glm/configuration_glm.py +28 -24
- transformers/models/glm/modeling_glm.py +40 -37
- transformers/models/glm/modular_glm.py +7 -4
- transformers/models/glm4/configuration_glm4.py +28 -24
- transformers/models/glm4/modeling_glm4.py +42 -40
- transformers/models/glm4/modular_glm4.py +10 -8
- transformers/models/glm46v/configuration_glm46v.py +1 -0
- transformers/models/glm46v/image_processing_glm46v.py +40 -35
- transformers/models/glm46v/image_processing_glm46v_fast.py +9 -9
- transformers/models/glm46v/modeling_glm46v.py +90 -137
- transformers/models/glm46v/modular_glm46v.py +3 -4
- transformers/models/glm46v/processing_glm46v.py +41 -7
- transformers/models/glm46v/video_processing_glm46v.py +11 -9
- transformers/models/glm4_moe/configuration_glm4_moe.py +32 -40
- transformers/models/glm4_moe/modeling_glm4_moe.py +42 -45
- transformers/models/glm4_moe/modular_glm4_moe.py +34 -42
- transformers/models/glm4v/configuration_glm4v.py +20 -18
- transformers/models/glm4v/image_processing_glm4v.py +40 -34
- transformers/models/glm4v/image_processing_glm4v_fast.py +9 -8
- transformers/models/glm4v/modeling_glm4v.py +205 -254
- transformers/models/glm4v/modular_glm4v.py +224 -210
- transformers/models/glm4v/processing_glm4v.py +41 -7
- transformers/models/glm4v/video_processing_glm4v.py +11 -9
- transformers/models/glm4v_moe/configuration_glm4v_moe.py +125 -136
- transformers/models/glm4v_moe/modeling_glm4v_moe.py +368 -377
- transformers/models/glm4v_moe/modular_glm4v_moe.py +169 -83
- transformers/models/glpn/configuration_glpn.py +1 -0
- transformers/models/glpn/image_processing_glpn.py +12 -11
- transformers/models/glpn/image_processing_glpn_fast.py +13 -11
- transformers/models/glpn/modeling_glpn.py +14 -16
- transformers/models/got_ocr2/configuration_got_ocr2.py +12 -4
- transformers/models/got_ocr2/image_processing_got_ocr2.py +24 -22
- transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +11 -9
- transformers/models/got_ocr2/modeling_got_ocr2.py +80 -77
- transformers/models/got_ocr2/modular_got_ocr2.py +51 -54
- transformers/models/got_ocr2/processing_got_ocr2.py +63 -42
- transformers/models/gpt2/configuration_gpt2.py +2 -13
- transformers/models/gpt2/modeling_gpt2.py +115 -120
- transformers/models/gpt2/tokenization_gpt2.py +46 -15
- transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +2 -5
- transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +89 -79
- transformers/models/gpt_neo/configuration_gpt_neo.py +2 -9
- transformers/models/gpt_neo/modeling_gpt_neo.py +67 -83
- transformers/models/gpt_neox/configuration_gpt_neox.py +25 -25
- transformers/models/gpt_neox/modeling_gpt_neox.py +75 -76
- transformers/models/gpt_neox/modular_gpt_neox.py +66 -67
- transformers/models/gpt_neox/tokenization_gpt_neox.py +51 -9
- transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +19 -24
- transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +47 -46
- transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +3 -1
- transformers/models/gpt_oss/configuration_gpt_oss.py +28 -46
- transformers/models/gpt_oss/modeling_gpt_oss.py +121 -83
- transformers/models/gpt_oss/modular_gpt_oss.py +103 -64
- transformers/models/gpt_sw3/tokenization_gpt_sw3.py +4 -4
- transformers/models/gptj/configuration_gptj.py +4 -4
- transformers/models/gptj/modeling_gptj.py +87 -101
- transformers/models/granite/configuration_granite.py +33 -28
- transformers/models/granite/modeling_granite.py +46 -44
- transformers/models/granite/modular_granite.py +31 -29
- transformers/models/granite_speech/configuration_granite_speech.py +1 -0
- transformers/models/granite_speech/feature_extraction_granite_speech.py +3 -1
- transformers/models/granite_speech/modeling_granite_speech.py +52 -82
- transformers/models/granite_speech/processing_granite_speech.py +4 -11
- transformers/models/granitemoe/configuration_granitemoe.py +36 -31
- transformers/models/granitemoe/modeling_granitemoe.py +46 -41
- transformers/models/granitemoe/modular_granitemoe.py +27 -22
- transformers/models/granitemoehybrid/__init__.py +1 -0
- transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +47 -46
- transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +93 -97
- transformers/models/granitemoehybrid/modular_granitemoehybrid.py +21 -54
- transformers/models/granitemoeshared/configuration_granitemoeshared.py +37 -33
- transformers/models/granitemoeshared/modeling_granitemoeshared.py +61 -54
- transformers/models/granitemoeshared/modular_granitemoeshared.py +21 -19
- transformers/models/grounding_dino/configuration_grounding_dino.py +4 -6
- transformers/models/grounding_dino/image_processing_grounding_dino.py +62 -60
- transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +29 -28
- transformers/models/grounding_dino/modeling_grounding_dino.py +140 -155
- transformers/models/grounding_dino/modular_grounding_dino.py +3 -2
- transformers/models/grounding_dino/processing_grounding_dino.py +38 -10
- transformers/models/groupvit/configuration_groupvit.py +2 -4
- transformers/models/groupvit/modeling_groupvit.py +93 -107
- transformers/models/helium/configuration_helium.py +29 -25
- transformers/models/helium/modeling_helium.py +40 -38
- transformers/models/helium/modular_helium.py +7 -3
- transformers/models/herbert/tokenization_herbert.py +28 -10
- transformers/models/hgnet_v2/configuration_hgnet_v2.py +1 -0
- transformers/models/hgnet_v2/modeling_hgnet_v2.py +10 -24
- transformers/models/hgnet_v2/modular_hgnet_v2.py +10 -24
- transformers/models/hiera/configuration_hiera.py +1 -0
- transformers/models/hiera/modeling_hiera.py +66 -72
- transformers/models/hubert/configuration_hubert.py +2 -4
- transformers/models/hubert/modeling_hubert.py +37 -42
- transformers/models/hubert/modular_hubert.py +11 -13
- transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +31 -26
- transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +38 -35
- transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +6 -4
- transformers/models/hunyuan_v1_moe/__init__.py +1 -1
- transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +36 -31
- transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +42 -47
- transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +9 -9
- transformers/models/ibert/configuration_ibert.py +2 -4
- transformers/models/ibert/modeling_ibert.py +62 -82
- transformers/models/ibert/quant_modules.py +1 -0
- transformers/models/idefics/configuration_idefics.py +8 -5
- transformers/models/idefics/image_processing_idefics.py +15 -13
- transformers/models/idefics/modeling_idefics.py +82 -75
- transformers/models/idefics/perceiver.py +3 -1
- transformers/models/idefics/processing_idefics.py +48 -32
- transformers/models/idefics/vision.py +25 -24
- transformers/models/idefics2/configuration_idefics2.py +3 -1
- transformers/models/idefics2/image_processing_idefics2.py +32 -31
- transformers/models/idefics2/image_processing_idefics2_fast.py +8 -8
- transformers/models/idefics2/modeling_idefics2.py +101 -127
- transformers/models/idefics2/processing_idefics2.py +68 -10
- transformers/models/idefics3/configuration_idefics3.py +4 -1
- transformers/models/idefics3/image_processing_idefics3.py +43 -42
- transformers/models/idefics3/image_processing_idefics3_fast.py +15 -40
- transformers/models/idefics3/modeling_idefics3.py +90 -115
- transformers/models/idefics3/processing_idefics3.py +69 -15
- transformers/models/ijepa/configuration_ijepa.py +1 -0
- transformers/models/ijepa/modeling_ijepa.py +11 -10
- transformers/models/ijepa/modular_ijepa.py +7 -5
- transformers/models/imagegpt/configuration_imagegpt.py +2 -9
- transformers/models/imagegpt/image_processing_imagegpt.py +18 -17
- transformers/models/imagegpt/image_processing_imagegpt_fast.py +16 -11
- transformers/models/imagegpt/modeling_imagegpt.py +65 -76
- transformers/models/informer/configuration_informer.py +9 -6
- transformers/models/informer/modeling_informer.py +86 -88
- transformers/models/informer/modular_informer.py +16 -14
- transformers/models/instructblip/configuration_instructblip.py +2 -2
- transformers/models/instructblip/modeling_instructblip.py +63 -103
- transformers/models/instructblip/processing_instructblip.py +36 -10
- transformers/models/instructblipvideo/configuration_instructblipvideo.py +2 -2
- transformers/models/instructblipvideo/modeling_instructblipvideo.py +139 -157
- transformers/models/instructblipvideo/modular_instructblipvideo.py +64 -73
- transformers/models/instructblipvideo/processing_instructblipvideo.py +33 -14
- transformers/models/instructblipvideo/video_processing_instructblipvideo.py +8 -6
- transformers/models/internvl/configuration_internvl.py +1 -0
- transformers/models/internvl/modeling_internvl.py +106 -85
- transformers/models/internvl/modular_internvl.py +67 -47
- transformers/models/internvl/processing_internvl.py +45 -12
- transformers/models/internvl/video_processing_internvl.py +12 -10
- transformers/models/jamba/configuration_jamba.py +8 -5
- transformers/models/jamba/modeling_jamba.py +66 -68
- transformers/models/jamba/modular_jamba.py +55 -54
- transformers/models/janus/configuration_janus.py +1 -0
- transformers/models/janus/image_processing_janus.py +37 -35
- transformers/models/janus/image_processing_janus_fast.py +20 -18
- transformers/models/janus/modeling_janus.py +191 -115
- transformers/models/janus/modular_janus.py +84 -133
- transformers/models/janus/processing_janus.py +43 -17
- transformers/models/jetmoe/configuration_jetmoe.py +26 -24
- transformers/models/jetmoe/modeling_jetmoe.py +46 -43
- transformers/models/jetmoe/modular_jetmoe.py +33 -31
- transformers/models/kosmos2/configuration_kosmos2.py +9 -10
- transformers/models/kosmos2/modeling_kosmos2.py +173 -208
- transformers/models/kosmos2/processing_kosmos2.py +55 -40
- transformers/models/kosmos2_5/__init__.py +1 -0
- transformers/models/kosmos2_5/configuration_kosmos2_5.py +9 -8
- transformers/models/kosmos2_5/image_processing_kosmos2_5.py +12 -10
- transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +13 -4
- transformers/models/kosmos2_5/modeling_kosmos2_5.py +118 -132
- transformers/models/kosmos2_5/processing_kosmos2_5.py +29 -8
- transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +28 -31
- transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py +14 -12
- transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +100 -110
- transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +22 -28
- transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py +8 -2
- transformers/models/layoutlm/configuration_layoutlm.py +2 -14
- transformers/models/layoutlm/modeling_layoutlm.py +72 -77
- transformers/models/layoutlmv2/configuration_layoutlmv2.py +17 -14
- transformers/models/layoutlmv2/image_processing_layoutlmv2.py +21 -18
- transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +9 -7
- transformers/models/layoutlmv2/modeling_layoutlmv2.py +50 -64
- transformers/models/layoutlmv2/processing_layoutlmv2.py +44 -14
- transformers/models/layoutlmv2/tokenization_layoutlmv2.py +126 -73
- transformers/models/layoutlmv3/configuration_layoutlmv3.py +19 -16
- transformers/models/layoutlmv3/image_processing_layoutlmv3.py +26 -24
- transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +11 -9
- transformers/models/layoutlmv3/modeling_layoutlmv3.py +56 -82
- transformers/models/layoutlmv3/processing_layoutlmv3.py +46 -14
- transformers/models/layoutlmv3/tokenization_layoutlmv3.py +134 -74
- transformers/models/layoutxlm/configuration_layoutxlm.py +17 -14
- transformers/models/layoutxlm/modular_layoutxlm.py +1 -0
- transformers/models/layoutxlm/processing_layoutxlm.py +44 -14
- transformers/models/layoutxlm/tokenization_layoutxlm.py +113 -77
- transformers/models/led/configuration_led.py +12 -8
- transformers/models/led/modeling_led.py +266 -124
- transformers/models/levit/configuration_levit.py +1 -0
- transformers/models/levit/image_processing_levit.py +21 -19
- transformers/models/levit/image_processing_levit_fast.py +5 -4
- transformers/models/levit/modeling_levit.py +19 -38
- transformers/models/lfm2/configuration_lfm2.py +30 -27
- transformers/models/lfm2/modeling_lfm2.py +50 -47
- transformers/models/lfm2/modular_lfm2.py +30 -29
- transformers/models/lfm2_moe/__init__.py +1 -0
- transformers/models/lfm2_moe/configuration_lfm2_moe.py +9 -6
- transformers/models/lfm2_moe/modeling_lfm2_moe.py +53 -61
- transformers/models/lfm2_moe/modular_lfm2_moe.py +37 -13
- transformers/models/lfm2_vl/configuration_lfm2_vl.py +1 -4
- transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +12 -41
- transformers/models/lfm2_vl/modeling_lfm2_vl.py +66 -84
- transformers/models/lfm2_vl/modular_lfm2_vl.py +56 -70
- transformers/models/lfm2_vl/processing_lfm2_vl.py +76 -96
- transformers/models/lightglue/image_processing_lightglue.py +15 -16
- transformers/models/lightglue/image_processing_lightglue_fast.py +9 -9
- transformers/models/lightglue/modeling_lightglue.py +31 -31
- transformers/models/lightglue/modular_lightglue.py +28 -29
- transformers/models/lilt/configuration_lilt.py +2 -6
- transformers/models/lilt/modeling_lilt.py +70 -76
- transformers/models/llama/configuration_llama.py +31 -26
- transformers/models/llama/modeling_llama.py +39 -36
- transformers/models/llama/tokenization_llama.py +44 -14
- transformers/models/llama4/configuration_llama4.py +30 -27
- transformers/models/llama4/image_processing_llama4_fast.py +14 -12
- transformers/models/llama4/modeling_llama4.py +113 -120
- transformers/models/llama4/processing_llama4.py +57 -33
- transformers/models/llava/configuration_llava.py +1 -10
- transformers/models/llava/image_processing_llava.py +28 -25
- transformers/models/llava/image_processing_llava_fast.py +11 -9
- transformers/models/llava/modeling_llava.py +109 -85
- transformers/models/llava/processing_llava.py +51 -18
- transformers/models/llava_next/configuration_llava_next.py +2 -2
- transformers/models/llava_next/image_processing_llava_next.py +45 -43
- transformers/models/llava_next/image_processing_llava_next_fast.py +13 -11
- transformers/models/llava_next/modeling_llava_next.py +107 -110
- transformers/models/llava_next/processing_llava_next.py +47 -18
- transformers/models/llava_next_video/configuration_llava_next_video.py +7 -4
- transformers/models/llava_next_video/modeling_llava_next_video.py +158 -175
- transformers/models/llava_next_video/modular_llava_next_video.py +150 -155
- transformers/models/llava_next_video/processing_llava_next_video.py +63 -21
- transformers/models/llava_next_video/video_processing_llava_next_video.py +1 -0
- transformers/models/llava_onevision/configuration_llava_onevision.py +7 -4
- transformers/models/llava_onevision/image_processing_llava_onevision.py +42 -40
- transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +15 -14
- transformers/models/llava_onevision/modeling_llava_onevision.py +169 -177
- transformers/models/llava_onevision/modular_llava_onevision.py +156 -163
- transformers/models/llava_onevision/processing_llava_onevision.py +53 -21
- transformers/models/llava_onevision/video_processing_llava_onevision.py +1 -0
- transformers/models/longcat_flash/__init__.py +1 -0
- transformers/models/longcat_flash/configuration_longcat_flash.py +42 -37
- transformers/models/longcat_flash/modeling_longcat_flash.py +36 -36
- transformers/models/longcat_flash/modular_longcat_flash.py +21 -21
- transformers/models/longformer/configuration_longformer.py +5 -5
- transformers/models/longformer/modeling_longformer.py +101 -105
- transformers/models/longt5/configuration_longt5.py +7 -9
- transformers/models/longt5/modeling_longt5.py +49 -49
- transformers/models/luke/configuration_luke.py +2 -8
- transformers/models/luke/modeling_luke.py +181 -188
- transformers/models/luke/tokenization_luke.py +140 -107
- transformers/models/lxmert/configuration_lxmert.py +1 -16
- transformers/models/lxmert/modeling_lxmert.py +74 -65
- transformers/models/m2m_100/configuration_m2m_100.py +9 -7
- transformers/models/m2m_100/modeling_m2m_100.py +71 -83
- transformers/models/m2m_100/tokenization_m2m_100.py +8 -8
- transformers/models/mamba/configuration_mamba.py +2 -1
- transformers/models/mamba/modeling_mamba.py +66 -58
- transformers/models/mamba2/configuration_mamba2.py +8 -5
- transformers/models/mamba2/modeling_mamba2.py +69 -68
- transformers/models/marian/configuration_marian.py +5 -10
- transformers/models/marian/modeling_marian.py +87 -93
- transformers/models/marian/tokenization_marian.py +6 -6
- transformers/models/markuplm/configuration_markuplm.py +7 -4
- transformers/models/markuplm/feature_extraction_markuplm.py +2 -1
- transformers/models/markuplm/modeling_markuplm.py +70 -69
- transformers/models/markuplm/processing_markuplm.py +38 -31
- transformers/models/markuplm/tokenization_markuplm.py +136 -93
- transformers/models/mask2former/configuration_mask2former.py +8 -5
- transformers/models/mask2former/image_processing_mask2former.py +85 -84
- transformers/models/mask2former/image_processing_mask2former_fast.py +40 -37
- transformers/models/mask2former/modeling_mask2former.py +103 -118
- transformers/models/mask2former/modular_mask2former.py +8 -6
- transformers/models/maskformer/configuration_maskformer.py +9 -6
- transformers/models/maskformer/configuration_maskformer_swin.py +1 -0
- transformers/models/maskformer/image_processing_maskformer.py +85 -84
- transformers/models/maskformer/image_processing_maskformer_fast.py +40 -36
- transformers/models/maskformer/modeling_maskformer.py +65 -79
- transformers/models/maskformer/modeling_maskformer_swin.py +32 -36
- transformers/models/mbart/configuration_mbart.py +4 -9
- transformers/models/mbart/modeling_mbart.py +116 -131
- transformers/models/mbart/tokenization_mbart.py +54 -11
- transformers/models/mbart50/tokenization_mbart50.py +13 -8
- transformers/models/megatron_bert/configuration_megatron_bert.py +3 -13
- transformers/models/megatron_bert/modeling_megatron_bert.py +150 -148
- transformers/models/metaclip_2/configuration_metaclip_2.py +1 -4
- transformers/models/metaclip_2/modeling_metaclip_2.py +84 -91
- transformers/models/metaclip_2/modular_metaclip_2.py +45 -61
- transformers/models/mgp_str/configuration_mgp_str.py +1 -0
- transformers/models/mgp_str/modeling_mgp_str.py +18 -20
- transformers/models/mgp_str/processing_mgp_str.py +20 -3
- transformers/models/mgp_str/tokenization_mgp_str.py +3 -1
- transformers/models/mimi/configuration_mimi.py +40 -42
- transformers/models/mimi/modeling_mimi.py +113 -142
- transformers/models/minimax/__init__.py +1 -0
- transformers/models/minimax/configuration_minimax.py +43 -37
- transformers/models/minimax/modeling_minimax.py +51 -61
- transformers/models/minimax/modular_minimax.py +62 -68
- transformers/models/ministral/configuration_ministral.py +29 -25
- transformers/models/ministral/modeling_ministral.py +38 -36
- transformers/models/ministral/modular_ministral.py +37 -32
- transformers/models/ministral3/configuration_ministral3.py +27 -24
- transformers/models/ministral3/modeling_ministral3.py +37 -36
- transformers/models/ministral3/modular_ministral3.py +5 -4
- transformers/models/mistral/configuration_mistral.py +29 -24
- transformers/models/mistral/modeling_mistral.py +37 -36
- transformers/models/mistral/modular_mistral.py +12 -11
- transformers/models/mistral3/configuration_mistral3.py +1 -4
- transformers/models/mistral3/modeling_mistral3.py +86 -89
- transformers/models/mistral3/modular_mistral3.py +68 -69
- transformers/models/mixtral/configuration_mixtral.py +34 -29
- transformers/models/mixtral/modeling_mixtral.py +45 -50
- transformers/models/mixtral/modular_mixtral.py +31 -32
- transformers/models/mlcd/configuration_mlcd.py +1 -0
- transformers/models/mlcd/modeling_mlcd.py +14 -20
- transformers/models/mlcd/modular_mlcd.py +13 -17
- transformers/models/mllama/configuration_mllama.py +15 -10
- transformers/models/mllama/image_processing_mllama.py +25 -23
- transformers/models/mllama/image_processing_mllama_fast.py +11 -11
- transformers/models/mllama/modeling_mllama.py +94 -105
- transformers/models/mllama/processing_mllama.py +55 -6
- transformers/models/mluke/tokenization_mluke.py +107 -101
- transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +3 -5
- transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +140 -155
- transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +3 -5
- transformers/models/mobilebert/configuration_mobilebert.py +2 -4
- transformers/models/mobilebert/modeling_mobilebert.py +85 -77
- transformers/models/mobilebert/tokenization_mobilebert.py +1 -0
- transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +1 -0
- transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +23 -20
- transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py +1 -0
- transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +16 -15
- transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +1 -0
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +51 -48
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +15 -13
- transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +22 -24
- transformers/models/mobilevit/configuration_mobilevit.py +1 -0
- transformers/models/mobilevit/image_processing_mobilevit.py +49 -46
- transformers/models/mobilevit/image_processing_mobilevit_fast.py +14 -12
- transformers/models/mobilevit/modeling_mobilevit.py +21 -28
- transformers/models/mobilevitv2/configuration_mobilevitv2.py +1 -0
- transformers/models/mobilevitv2/modeling_mobilevitv2.py +22 -28
- transformers/models/modernbert/configuration_modernbert.py +42 -44
- transformers/models/modernbert/modeling_modernbert.py +133 -145
- transformers/models/modernbert/modular_modernbert.py +170 -186
- transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +40 -40
- transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +57 -62
- transformers/models/modernbert_decoder/modular_modernbert_decoder.py +86 -94
- transformers/models/moonshine/configuration_moonshine.py +31 -34
- transformers/models/moonshine/modeling_moonshine.py +71 -71
- transformers/models/moonshine/modular_moonshine.py +83 -88
- transformers/models/moshi/configuration_moshi.py +23 -46
- transformers/models/moshi/modeling_moshi.py +187 -157
- transformers/models/mpnet/configuration_mpnet.py +2 -6
- transformers/models/mpnet/modeling_mpnet.py +57 -62
- transformers/models/mpnet/tokenization_mpnet.py +15 -4
- transformers/models/mpt/configuration_mpt.py +9 -5
- transformers/models/mpt/modeling_mpt.py +60 -60
- transformers/models/mra/configuration_mra.py +2 -8
- transformers/models/mra/modeling_mra.py +57 -64
- transformers/models/mt5/configuration_mt5.py +8 -10
- transformers/models/mt5/modeling_mt5.py +95 -87
- transformers/models/musicgen/configuration_musicgen.py +8 -12
- transformers/models/musicgen/modeling_musicgen.py +122 -118
- transformers/models/musicgen/processing_musicgen.py +21 -3
- transformers/models/musicgen_melody/configuration_musicgen_melody.py +8 -15
- transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +9 -8
- transformers/models/musicgen_melody/modeling_musicgen_melody.py +123 -117
- transformers/models/musicgen_melody/processing_musicgen_melody.py +22 -3
- transformers/models/mvp/configuration_mvp.py +5 -8
- transformers/models/mvp/modeling_mvp.py +123 -135
- transformers/models/myt5/tokenization_myt5.py +10 -8
- transformers/models/nanochat/configuration_nanochat.py +8 -5
- transformers/models/nanochat/modeling_nanochat.py +40 -37
- transformers/models/nanochat/modular_nanochat.py +14 -12
- transformers/models/nemotron/configuration_nemotron.py +30 -25
- transformers/models/nemotron/modeling_nemotron.py +57 -56
- transformers/models/nllb/tokenization_nllb.py +28 -12
- transformers/models/nllb_moe/configuration_nllb_moe.py +9 -7
- transformers/models/nllb_moe/modeling_nllb_moe.py +69 -77
- transformers/models/nougat/image_processing_nougat.py +32 -29
- transformers/models/nougat/image_processing_nougat_fast.py +14 -12
- transformers/models/nougat/processing_nougat.py +39 -37
- transformers/models/nougat/tokenization_nougat.py +73 -18
- transformers/models/nystromformer/configuration_nystromformer.py +2 -8
- transformers/models/nystromformer/modeling_nystromformer.py +63 -74
- transformers/models/olmo/configuration_olmo.py +28 -23
- transformers/models/olmo/modeling_olmo.py +39 -36
- transformers/models/olmo/modular_olmo.py +11 -7
- transformers/models/olmo2/configuration_olmo2.py +28 -23
- transformers/models/olmo2/modeling_olmo2.py +41 -37
- transformers/models/olmo2/modular_olmo2.py +32 -29
- transformers/models/olmo3/__init__.py +1 -0
- transformers/models/olmo3/configuration_olmo3.py +30 -26
- transformers/models/olmo3/modeling_olmo3.py +39 -36
- transformers/models/olmo3/modular_olmo3.py +40 -37
- transformers/models/olmoe/configuration_olmoe.py +33 -29
- transformers/models/olmoe/modeling_olmoe.py +46 -52
- transformers/models/olmoe/modular_olmoe.py +15 -16
- transformers/models/omdet_turbo/configuration_omdet_turbo.py +4 -2
- transformers/models/omdet_turbo/modeling_omdet_turbo.py +47 -53
- transformers/models/omdet_turbo/processing_omdet_turbo.py +67 -19
- transformers/models/oneformer/configuration_oneformer.py +8 -5
- transformers/models/oneformer/image_processing_oneformer.py +84 -83
- transformers/models/oneformer/image_processing_oneformer_fast.py +42 -41
- transformers/models/oneformer/modeling_oneformer.py +171 -147
- transformers/models/oneformer/processing_oneformer.py +43 -28
- transformers/models/openai/configuration_openai.py +1 -16
- transformers/models/openai/modeling_openai.py +51 -65
- transformers/models/openai/tokenization_openai.py +47 -8
- transformers/models/opt/configuration_opt.py +7 -6
- transformers/models/opt/modeling_opt.py +76 -78
- transformers/models/ovis2/__init__.py +1 -0
- transformers/models/ovis2/configuration_ovis2.py +1 -0
- transformers/models/ovis2/image_processing_ovis2.py +24 -22
- transformers/models/ovis2/image_processing_ovis2_fast.py +11 -9
- transformers/models/ovis2/modeling_ovis2.py +142 -111
- transformers/models/ovis2/modular_ovis2.py +45 -90
- transformers/models/ovis2/processing_ovis2.py +40 -12
- transformers/models/owlv2/configuration_owlv2.py +2 -4
- transformers/models/owlv2/image_processing_owlv2.py +21 -20
- transformers/models/owlv2/image_processing_owlv2_fast.py +15 -12
- transformers/models/owlv2/modeling_owlv2.py +117 -133
- transformers/models/owlv2/modular_owlv2.py +14 -11
- transformers/models/owlv2/processing_owlv2.py +49 -20
- transformers/models/owlvit/configuration_owlvit.py +2 -4
- transformers/models/owlvit/image_processing_owlvit.py +22 -21
- transformers/models/owlvit/image_processing_owlvit_fast.py +3 -2
- transformers/models/owlvit/modeling_owlvit.py +116 -132
- transformers/models/owlvit/processing_owlvit.py +48 -20
- transformers/models/paligemma/configuration_paligemma.py +1 -4
- transformers/models/paligemma/modeling_paligemma.py +93 -103
- transformers/models/paligemma/processing_paligemma.py +66 -13
- transformers/models/parakeet/configuration_parakeet.py +14 -7
- transformers/models/parakeet/feature_extraction_parakeet.py +12 -10
- transformers/models/parakeet/modeling_parakeet.py +28 -32
- transformers/models/parakeet/modular_parakeet.py +20 -23
- transformers/models/parakeet/processing_parakeet.py +5 -13
- transformers/models/parakeet/{tokenization_parakeet.py → tokenization_parakeet_fast.py} +7 -5
- transformers/models/patchtsmixer/configuration_patchtsmixer.py +8 -5
- transformers/models/patchtsmixer/modeling_patchtsmixer.py +62 -70
- transformers/models/patchtst/configuration_patchtst.py +9 -6
- transformers/models/patchtst/modeling_patchtst.py +80 -97
- transformers/models/pegasus/configuration_pegasus.py +5 -8
- transformers/models/pegasus/modeling_pegasus.py +66 -72
- transformers/models/pegasus/tokenization_pegasus.py +45 -15
- transformers/models/pegasus_x/configuration_pegasus_x.py +4 -5
- transformers/models/pegasus_x/modeling_pegasus_x.py +52 -55
- transformers/models/perceiver/configuration_perceiver.py +1 -0
- transformers/models/perceiver/image_processing_perceiver.py +25 -22
- transformers/models/perceiver/image_processing_perceiver_fast.py +9 -7
- transformers/models/perceiver/modeling_perceiver.py +146 -165
- transformers/models/perceiver/tokenization_perceiver.py +6 -3
- transformers/models/perception_lm/configuration_perception_lm.py +1 -0
- transformers/models/perception_lm/image_processing_perception_lm_fast.py +10 -8
- transformers/models/perception_lm/modeling_perception_lm.py +70 -71
- transformers/models/perception_lm/modular_perception_lm.py +61 -65
- transformers/models/perception_lm/processing_perception_lm.py +47 -13
- transformers/models/perception_lm/video_processing_perception_lm.py +1 -0
- transformers/models/persimmon/configuration_persimmon.py +28 -23
- transformers/models/persimmon/modeling_persimmon.py +45 -43
- transformers/models/phi/configuration_phi.py +28 -23
- transformers/models/phi/modeling_phi.py +43 -40
- transformers/models/phi/modular_phi.py +24 -23
- transformers/models/phi3/configuration_phi3.py +33 -28
- transformers/models/phi3/modeling_phi3.py +38 -36
- transformers/models/phi3/modular_phi3.py +17 -13
- transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +33 -30
- transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py +9 -7
- transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +11 -11
- transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +78 -95
- transformers/models/phi4_multimodal/modular_phi4_multimodal.py +80 -98
- transformers/models/phi4_multimodal/processing_phi4_multimodal.py +44 -7
- transformers/models/phimoe/configuration_phimoe.py +36 -31
- transformers/models/phimoe/modeling_phimoe.py +45 -50
- transformers/models/phimoe/modular_phimoe.py +4 -3
- transformers/models/phobert/tokenization_phobert.py +6 -4
- transformers/models/pix2struct/configuration_pix2struct.py +10 -12
- transformers/models/pix2struct/image_processing_pix2struct.py +19 -15
- transformers/models/pix2struct/image_processing_pix2struct_fast.py +15 -12
- transformers/models/pix2struct/modeling_pix2struct.py +52 -58
- transformers/models/pix2struct/processing_pix2struct.py +30 -5
- transformers/models/pixtral/configuration_pixtral.py +14 -11
- transformers/models/pixtral/image_processing_pixtral.py +28 -26
- transformers/models/pixtral/image_processing_pixtral_fast.py +11 -10
- transformers/models/pixtral/modeling_pixtral.py +34 -28
- transformers/models/pixtral/processing_pixtral.py +53 -21
- transformers/models/plbart/configuration_plbart.py +5 -8
- transformers/models/plbart/modeling_plbart.py +106 -119
- transformers/models/plbart/modular_plbart.py +33 -39
- transformers/models/plbart/tokenization_plbart.py +7 -4
- transformers/models/poolformer/configuration_poolformer.py +1 -0
- transformers/models/poolformer/image_processing_poolformer.py +24 -21
- transformers/models/poolformer/image_processing_poolformer_fast.py +15 -13
- transformers/models/poolformer/modeling_poolformer.py +13 -23
- transformers/models/pop2piano/configuration_pop2piano.py +8 -7
- transformers/models/pop2piano/feature_extraction_pop2piano.py +9 -6
- transformers/models/pop2piano/modeling_pop2piano.py +24 -26
- transformers/models/pop2piano/processing_pop2piano.py +33 -25
- transformers/models/pop2piano/tokenization_pop2piano.py +23 -15
- transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +3 -3
- transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +28 -28
- transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +21 -20
- transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +13 -16
- transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +13 -16
- transformers/models/prophetnet/configuration_prophetnet.py +38 -37
- transformers/models/prophetnet/modeling_prophetnet.py +131 -114
- transformers/models/prophetnet/tokenization_prophetnet.py +16 -14
- transformers/models/pvt/configuration_pvt.py +1 -0
- transformers/models/pvt/image_processing_pvt.py +27 -24
- transformers/models/pvt/image_processing_pvt_fast.py +2 -1
- transformers/models/pvt/modeling_pvt.py +21 -21
- transformers/models/pvt_v2/configuration_pvt_v2.py +4 -2
- transformers/models/pvt_v2/modeling_pvt_v2.py +25 -28
- transformers/models/qwen2/configuration_qwen2.py +25 -32
- transformers/models/qwen2/modeling_qwen2.py +38 -36
- transformers/models/qwen2/modular_qwen2.py +12 -11
- transformers/models/qwen2/tokenization_qwen2.py +23 -12
- transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +26 -32
- transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +277 -340
- transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +211 -278
- transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +49 -41
- transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +35 -29
- transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +148 -203
- transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +118 -93
- transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py +43 -7
- transformers/models/qwen2_audio/configuration_qwen2_audio.py +1 -0
- transformers/models/qwen2_audio/modeling_qwen2_audio.py +40 -40
- transformers/models/qwen2_audio/processing_qwen2_audio.py +42 -13
- transformers/models/qwen2_moe/configuration_qwen2_moe.py +35 -42
- transformers/models/qwen2_moe/modeling_qwen2_moe.py +46 -51
- transformers/models/qwen2_moe/modular_qwen2_moe.py +10 -7
- transformers/models/qwen2_vl/configuration_qwen2_vl.py +34 -29
- transformers/models/qwen2_vl/image_processing_qwen2_vl.py +42 -41
- transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +15 -12
- transformers/models/qwen2_vl/modeling_qwen2_vl.py +153 -199
- transformers/models/qwen2_vl/processing_qwen2_vl.py +44 -7
- transformers/models/qwen2_vl/video_processing_qwen2_vl.py +18 -38
- transformers/models/qwen3/configuration_qwen3.py +27 -34
- transformers/models/qwen3/modeling_qwen3.py +39 -36
- transformers/models/qwen3/modular_qwen3.py +6 -4
- transformers/models/qwen3_moe/configuration_qwen3_moe.py +32 -39
- transformers/models/qwen3_moe/modeling_qwen3_moe.py +46 -51
- transformers/models/qwen3_moe/modular_qwen3_moe.py +13 -10
- transformers/models/qwen3_next/configuration_qwen3_next.py +35 -45
- transformers/models/qwen3_next/modeling_qwen3_next.py +51 -47
- transformers/models/qwen3_next/modular_qwen3_next.py +35 -34
- transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +101 -135
- transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +252 -355
- transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +196 -250
- transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +48 -40
- transformers/models/qwen3_vl/configuration_qwen3_vl.py +29 -27
- transformers/models/qwen3_vl/modeling_qwen3_vl.py +155 -233
- transformers/models/qwen3_vl/modular_qwen3_vl.py +179 -206
- transformers/models/qwen3_vl/processing_qwen3_vl.py +42 -6
- transformers/models/qwen3_vl/video_processing_qwen3_vl.py +12 -10
- transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +30 -23
- transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +303 -358
- transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +124 -87
- transformers/models/rag/configuration_rag.py +15 -6
- transformers/models/rag/modeling_rag.py +130 -127
- transformers/models/rag/retrieval_rag.py +5 -3
- transformers/models/rag/tokenization_rag.py +50 -0
- transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +30 -29
- transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +42 -53
- transformers/models/reformer/configuration_reformer.py +8 -7
- transformers/models/reformer/modeling_reformer.py +69 -80
- transformers/models/reformer/tokenization_reformer.py +31 -11
- transformers/models/regnet/configuration_regnet.py +1 -0
- transformers/models/regnet/modeling_regnet.py +8 -15
- transformers/models/rembert/configuration_rembert.py +2 -8
- transformers/models/rembert/modeling_rembert.py +111 -121
- transformers/models/rembert/tokenization_rembert.py +12 -2
- transformers/models/resnet/configuration_resnet.py +1 -0
- transformers/models/resnet/modeling_resnet.py +13 -27
- transformers/models/roberta/configuration_roberta.py +3 -11
- transformers/models/roberta/modeling_roberta.py +93 -94
- transformers/models/roberta/modular_roberta.py +58 -58
- transformers/models/roberta/tokenization_roberta.py +29 -17
- transformers/models/roberta/tokenization_roberta_old.py +4 -2
- transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +3 -11
- transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +93 -94
- transformers/models/roc_bert/configuration_roc_bert.py +2 -8
- transformers/models/roc_bert/modeling_roc_bert.py +121 -122
- transformers/models/roc_bert/tokenization_roc_bert.py +94 -88
- transformers/models/roformer/configuration_roformer.py +3 -13
- transformers/models/roformer/modeling_roformer.py +81 -85
- transformers/models/roformer/tokenization_roformer.py +412 -74
- transformers/models/roformer/tokenization_roformer_fast.py +160 -0
- transformers/models/roformer/tokenization_utils.py +1 -0
- transformers/models/rt_detr/configuration_rt_detr.py +2 -1
- transformers/models/rt_detr/configuration_rt_detr_resnet.py +1 -0
- transformers/models/rt_detr/image_processing_rt_detr.py +55 -54
- transformers/models/rt_detr/image_processing_rt_detr_fast.py +26 -26
- transformers/models/rt_detr/modeling_rt_detr.py +90 -99
- transformers/models/rt_detr/modeling_rt_detr_resnet.py +6 -13
- transformers/models/rt_detr/modular_rt_detr.py +16 -16
- transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +4 -6
- transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +90 -101
- transformers/models/rt_detr_v2/modular_rt_detr_v2.py +12 -19
- transformers/models/rwkv/configuration_rwkv.py +4 -2
- transformers/models/rwkv/modeling_rwkv.py +32 -31
- transformers/models/sam/configuration_sam.py +1 -3
- transformers/models/sam/image_processing_sam.py +60 -59
- transformers/models/sam/image_processing_sam_fast.py +27 -25
- transformers/models/sam/modeling_sam.py +41 -47
- transformers/models/sam/processing_sam.py +27 -39
- transformers/models/sam2/configuration_sam2.py +3 -2
- transformers/models/sam2/image_processing_sam2_fast.py +15 -14
- transformers/models/sam2/modeling_sam2.py +90 -96
- transformers/models/sam2/modular_sam2.py +91 -86
- transformers/models/sam2/processing_sam2.py +47 -31
- transformers/models/sam2_video/configuration_sam2_video.py +1 -0
- transformers/models/sam2_video/modeling_sam2_video.py +144 -151
- transformers/models/sam2_video/modular_sam2_video.py +104 -101
- transformers/models/sam2_video/processing_sam2_video.py +66 -49
- transformers/models/sam2_video/video_processing_sam2_video.py +4 -1
- transformers/models/sam3/configuration_sam3.py +2 -21
- transformers/models/sam3/image_processing_sam3_fast.py +20 -17
- transformers/models/sam3/modeling_sam3.py +170 -184
- transformers/models/sam3/modular_sam3.py +8 -3
- transformers/models/sam3/processing_sam3.py +52 -37
- transformers/models/sam3_tracker/__init__.py +1 -0
- transformers/models/sam3_tracker/configuration_sam3_tracker.py +3 -1
- transformers/models/sam3_tracker/modeling_sam3_tracker.py +77 -82
- transformers/models/sam3_tracker/modular_sam3_tracker.py +3 -8
- transformers/models/sam3_tracker/processing_sam3_tracker.py +48 -31
- transformers/models/sam3_tracker_video/__init__.py +1 -0
- transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +1 -25
- transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +122 -135
- transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +26 -35
- transformers/models/sam3_tracker_video/processing_sam3_tracker_video.py +66 -50
- transformers/models/sam3_video/configuration_sam3_video.py +1 -14
- transformers/models/sam3_video/modeling_sam3_video.py +34 -33
- transformers/models/sam3_video/processing_sam3_video.py +46 -26
- transformers/models/sam_hq/__init__.py +1 -1
- transformers/models/sam_hq/configuration_sam_hq.py +1 -3
- transformers/models/sam_hq/modeling_sam_hq.py +69 -74
- transformers/models/sam_hq/modular_sam_hq.py +25 -23
- transformers/models/sam_hq/{processing_sam_hq.py → processing_samhq.py} +29 -41
- transformers/models/seamless_m4t/configuration_seamless_m4t.py +10 -8
- transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +11 -8
- transformers/models/seamless_m4t/modeling_seamless_m4t.py +194 -212
- transformers/models/seamless_m4t/processing_seamless_m4t.py +39 -18
- transformers/models/seamless_m4t/tokenization_seamless_m4t.py +77 -40
- transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +10 -8
- transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +196 -204
- transformers/models/seed_oss/configuration_seed_oss.py +32 -28
- transformers/models/seed_oss/modeling_seed_oss.py +35 -33
- transformers/models/seed_oss/modular_seed_oss.py +4 -3
- transformers/models/segformer/configuration_segformer.py +10 -0
- transformers/models/segformer/image_processing_segformer.py +42 -39
- transformers/models/segformer/image_processing_segformer_fast.py +12 -10
- transformers/models/segformer/modeling_segformer.py +31 -34
- transformers/models/segformer/modular_segformer.py +10 -8
- transformers/models/seggpt/configuration_seggpt.py +1 -0
- transformers/models/seggpt/image_processing_seggpt.py +41 -38
- transformers/models/seggpt/modeling_seggpt.py +38 -50
- transformers/models/sew/configuration_sew.py +2 -4
- transformers/models/sew/modeling_sew.py +36 -38
- transformers/models/sew/modular_sew.py +13 -13
- transformers/models/sew_d/configuration_sew_d.py +2 -4
- transformers/models/sew_d/modeling_sew_d.py +30 -31
- transformers/models/shieldgemma2/configuration_shieldgemma2.py +1 -0
- transformers/models/shieldgemma2/modeling_shieldgemma2.py +17 -16
- transformers/models/shieldgemma2/processing_shieldgemma2.py +5 -3
- transformers/models/siglip/configuration_siglip.py +2 -4
- transformers/models/siglip/image_processing_siglip.py +20 -17
- transformers/models/siglip/image_processing_siglip_fast.py +1 -0
- transformers/models/siglip/modeling_siglip.py +75 -84
- transformers/models/siglip/processing_siglip.py +14 -2
- transformers/models/siglip/tokenization_siglip.py +7 -6
- transformers/models/siglip2/configuration_siglip2.py +2 -5
- transformers/models/siglip2/image_processing_siglip2.py +16 -15
- transformers/models/siglip2/image_processing_siglip2_fast.py +7 -6
- transformers/models/siglip2/modeling_siglip2.py +129 -143
- transformers/models/siglip2/modular_siglip2.py +46 -47
- transformers/models/siglip2/processing_siglip2.py +14 -2
- transformers/models/smollm3/configuration_smollm3.py +32 -29
- transformers/models/smollm3/modeling_smollm3.py +39 -36
- transformers/models/smollm3/modular_smollm3.py +35 -33
- transformers/models/smolvlm/configuration_smolvlm.py +4 -2
- transformers/models/smolvlm/image_processing_smolvlm.py +43 -42
- transformers/models/smolvlm/image_processing_smolvlm_fast.py +15 -41
- transformers/models/smolvlm/modeling_smolvlm.py +94 -126
- transformers/models/smolvlm/modular_smolvlm.py +39 -50
- transformers/models/smolvlm/processing_smolvlm.py +83 -15
- transformers/models/smolvlm/video_processing_smolvlm.py +18 -16
- transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +1 -0
- transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +27 -26
- transformers/models/speech_to_text/configuration_speech_to_text.py +9 -9
- transformers/models/speech_to_text/feature_extraction_speech_to_text.py +13 -10
- transformers/models/speech_to_text/modeling_speech_to_text.py +54 -66
- transformers/models/speech_to_text/processing_speech_to_text.py +30 -4
- transformers/models/speech_to_text/tokenization_speech_to_text.py +6 -5
- transformers/models/speecht5/configuration_speecht5.py +9 -7
- transformers/models/speecht5/feature_extraction_speecht5.py +37 -16
- transformers/models/speecht5/modeling_speecht5.py +175 -213
- transformers/models/speecht5/number_normalizer.py +1 -0
- transformers/models/speecht5/processing_speecht5.py +37 -3
- transformers/models/speecht5/tokenization_speecht5.py +5 -4
- transformers/models/splinter/configuration_splinter.py +7 -6
- transformers/models/splinter/modeling_splinter.py +59 -71
- transformers/models/splinter/tokenization_splinter.py +30 -9
- transformers/models/squeezebert/configuration_squeezebert.py +2 -14
- transformers/models/squeezebert/modeling_squeezebert.py +62 -68
- transformers/models/squeezebert/tokenization_squeezebert.py +1 -0
- transformers/models/stablelm/configuration_stablelm.py +29 -24
- transformers/models/stablelm/modeling_stablelm.py +45 -44
- transformers/models/starcoder2/configuration_starcoder2.py +27 -30
- transformers/models/starcoder2/modeling_starcoder2.py +41 -39
- transformers/models/starcoder2/modular_starcoder2.py +16 -14
- transformers/models/superglue/configuration_superglue.py +3 -7
- transformers/models/superglue/image_processing_superglue.py +15 -15
- transformers/models/superglue/image_processing_superglue_fast.py +10 -9
- transformers/models/superglue/modeling_superglue.py +37 -42
- transformers/models/superpoint/image_processing_superpoint.py +15 -15
- transformers/models/superpoint/image_processing_superpoint_fast.py +11 -8
- transformers/models/superpoint/modeling_superpoint.py +16 -18
- transformers/models/swiftformer/configuration_swiftformer.py +1 -0
- transformers/models/swiftformer/modeling_swiftformer.py +14 -18
- transformers/models/swin/configuration_swin.py +1 -0
- transformers/models/swin/modeling_swin.py +86 -86
- transformers/models/swin2sr/configuration_swin2sr.py +1 -0
- transformers/models/swin2sr/image_processing_swin2sr.py +13 -10
- transformers/models/swin2sr/image_processing_swin2sr_fast.py +8 -4
- transformers/models/swin2sr/modeling_swin2sr.py +63 -81
- transformers/models/swinv2/configuration_swinv2.py +1 -0
- transformers/models/swinv2/modeling_swinv2.py +104 -108
- transformers/models/switch_transformers/configuration_switch_transformers.py +7 -11
- transformers/models/switch_transformers/modeling_switch_transformers.py +44 -37
- transformers/models/switch_transformers/modular_switch_transformers.py +41 -34
- transformers/models/t5/configuration_t5.py +8 -14
- transformers/models/t5/modeling_t5.py +92 -88
- transformers/models/t5/tokenization_t5.py +9 -3
- transformers/models/t5gemma/configuration_t5gemma.py +41 -43
- transformers/models/t5gemma/modeling_t5gemma.py +107 -104
- transformers/models/t5gemma/modular_t5gemma.py +120 -124
- transformers/models/t5gemma2/configuration_t5gemma2.py +120 -80
- transformers/models/t5gemma2/modeling_t5gemma2.py +125 -141
- transformers/models/t5gemma2/modular_t5gemma2.py +104 -393
- transformers/models/table_transformer/configuration_table_transformer.py +2 -1
- transformers/models/table_transformer/modeling_table_transformer.py +49 -51
- transformers/models/tapas/configuration_tapas.py +2 -12
- transformers/models/tapas/modeling_tapas.py +67 -68
- transformers/models/tapas/tokenization_tapas.py +153 -115
- transformers/models/textnet/configuration_textnet.py +1 -0
- transformers/models/textnet/image_processing_textnet.py +25 -22
- transformers/models/textnet/image_processing_textnet_fast.py +10 -8
- transformers/models/textnet/modeling_textnet.py +16 -28
- transformers/models/time_series_transformer/configuration_time_series_transformer.py +8 -5
- transformers/models/time_series_transformer/modeling_time_series_transformer.py +81 -83
- transformers/models/timesfm/configuration_timesfm.py +1 -0
- transformers/models/timesfm/modeling_timesfm.py +22 -33
- transformers/models/timesfm/modular_timesfm.py +21 -32
- transformers/models/timesformer/configuration_timesformer.py +1 -0
- transformers/models/timesformer/modeling_timesformer.py +16 -15
- transformers/models/timm_backbone/configuration_timm_backbone.py +1 -0
- transformers/models/timm_backbone/modeling_timm_backbone.py +15 -17
- transformers/models/timm_wrapper/configuration_timm_wrapper.py +3 -5
- transformers/models/timm_wrapper/image_processing_timm_wrapper.py +5 -4
- transformers/models/timm_wrapper/modeling_timm_wrapper.py +29 -34
- transformers/models/trocr/configuration_trocr.py +8 -11
- transformers/models/trocr/modeling_trocr.py +44 -45
- transformers/models/trocr/processing_trocr.py +25 -5
- transformers/models/tvp/configuration_tvp.py +2 -5
- transformers/models/tvp/image_processing_tvp.py +52 -50
- transformers/models/tvp/image_processing_tvp_fast.py +15 -15
- transformers/models/tvp/modeling_tvp.py +27 -27
- transformers/models/tvp/processing_tvp.py +14 -2
- transformers/models/udop/configuration_udop.py +7 -16
- transformers/models/udop/modeling_udop.py +73 -71
- transformers/models/udop/processing_udop.py +26 -7
- transformers/models/udop/tokenization_udop.py +105 -84
- transformers/models/umt5/configuration_umt5.py +7 -8
- transformers/models/umt5/modeling_umt5.py +90 -94
- transformers/models/unispeech/configuration_unispeech.py +2 -4
- transformers/models/unispeech/modeling_unispeech.py +49 -51
- transformers/models/unispeech/modular_unispeech.py +22 -22
- transformers/models/unispeech_sat/configuration_unispeech_sat.py +2 -4
- transformers/models/unispeech_sat/modeling_unispeech_sat.py +65 -69
- transformers/models/unispeech_sat/modular_unispeech_sat.py +23 -23
- transformers/models/univnet/feature_extraction_univnet.py +14 -14
- transformers/models/univnet/modeling_univnet.py +8 -8
- transformers/models/upernet/configuration_upernet.py +1 -0
- transformers/models/upernet/modeling_upernet.py +13 -11
- transformers/models/vaultgemma/__init__.py +1 -0
- transformers/models/vaultgemma/configuration_vaultgemma.py +33 -29
- transformers/models/vaultgemma/modeling_vaultgemma.py +41 -39
- transformers/models/vaultgemma/modular_vaultgemma.py +31 -29
- transformers/models/video_llama_3/configuration_video_llama_3.py +0 -4
- transformers/models/video_llama_3/image_processing_video_llama_3.py +42 -43
- transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +14 -12
- transformers/models/video_llama_3/modeling_video_llama_3.py +109 -157
- transformers/models/video_llama_3/modular_video_llama_3.py +146 -155
- transformers/models/video_llama_3/processing_video_llama_3.py +39 -5
- transformers/models/video_llama_3/video_processing_video_llama_3.py +23 -42
- transformers/models/video_llava/configuration_video_llava.py +1 -4
- transformers/models/video_llava/image_processing_video_llava.py +38 -35
- transformers/models/video_llava/modeling_video_llava.py +146 -146
- transformers/models/video_llava/processing_video_llava.py +78 -38
- transformers/models/video_llava/video_processing_video_llava.py +1 -0
- transformers/models/videomae/configuration_videomae.py +1 -0
- transformers/models/videomae/image_processing_videomae.py +34 -31
- transformers/models/videomae/modeling_videomae.py +17 -14
- transformers/models/videomae/video_processing_videomae.py +1 -0
- transformers/models/vilt/configuration_vilt.py +4 -6
- transformers/models/vilt/image_processing_vilt.py +30 -29
- transformers/models/vilt/image_processing_vilt_fast.py +16 -15
- transformers/models/vilt/modeling_vilt.py +90 -116
- transformers/models/vilt/processing_vilt.py +14 -2
- transformers/models/vipllava/configuration_vipllava.py +1 -4
- transformers/models/vipllava/modeling_vipllava.py +70 -99
- transformers/models/vipllava/modular_vipllava.py +54 -78
- transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +1 -0
- transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +27 -28
- transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +1 -0
- transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +41 -46
- transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +16 -2
- transformers/models/visual_bert/configuration_visual_bert.py +2 -6
- transformers/models/visual_bert/modeling_visual_bert.py +92 -98
- transformers/models/vit/configuration_vit.py +1 -0
- transformers/models/vit/image_processing_vit.py +22 -19
- transformers/models/vit/image_processing_vit_fast.py +1 -0
- transformers/models/vit/modeling_vit.py +17 -17
- transformers/models/vit_mae/configuration_vit_mae.py +1 -0
- transformers/models/vit_mae/modeling_vit_mae.py +27 -29
- transformers/models/vit_msn/configuration_vit_msn.py +1 -0
- transformers/models/vit_msn/modeling_vit_msn.py +16 -18
- transformers/models/vitdet/configuration_vitdet.py +1 -0
- transformers/models/vitdet/modeling_vitdet.py +14 -14
- transformers/models/vitmatte/configuration_vitmatte.py +5 -2
- transformers/models/vitmatte/image_processing_vitmatte.py +18 -15
- transformers/models/vitmatte/image_processing_vitmatte_fast.py +18 -16
- transformers/models/vitmatte/modeling_vitmatte.py +11 -14
- transformers/models/vitpose/configuration_vitpose.py +7 -4
- transformers/models/vitpose/image_processing_vitpose.py +25 -24
- transformers/models/vitpose/image_processing_vitpose_fast.py +11 -9
- transformers/models/vitpose/modeling_vitpose.py +14 -14
- transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +1 -0
- transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +10 -8
- transformers/models/vits/configuration_vits.py +1 -4
- transformers/models/vits/modeling_vits.py +42 -44
- transformers/models/vits/tokenization_vits.py +4 -3
- transformers/models/vivit/configuration_vivit.py +1 -0
- transformers/models/vivit/image_processing_vivit.py +39 -36
- transformers/models/vivit/modeling_vivit.py +8 -6
- transformers/models/vjepa2/__init__.py +1 -0
- transformers/models/vjepa2/configuration_vjepa2.py +1 -0
- transformers/models/vjepa2/modeling_vjepa2.py +32 -31
- transformers/models/vjepa2/video_processing_vjepa2.py +1 -0
- transformers/models/voxtral/__init__.py +1 -0
- transformers/models/voxtral/configuration_voxtral.py +2 -0
- transformers/models/voxtral/modeling_voxtral.py +47 -40
- transformers/models/voxtral/modular_voxtral.py +40 -37
- transformers/models/voxtral/processing_voxtral.py +48 -25
- transformers/models/wav2vec2/configuration_wav2vec2.py +2 -4
- transformers/models/wav2vec2/feature_extraction_wav2vec2.py +10 -7
- transformers/models/wav2vec2/modeling_wav2vec2.py +121 -73
- transformers/models/wav2vec2/processing_wav2vec2.py +35 -6
- transformers/models/wav2vec2/tokenization_wav2vec2.py +332 -20
- transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +2 -4
- transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +62 -70
- transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +48 -57
- transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +35 -6
- transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +2 -4
- transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +77 -90
- transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +30 -37
- transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +17 -16
- transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +55 -36
- transformers/models/wavlm/configuration_wavlm.py +2 -4
- transformers/models/wavlm/modeling_wavlm.py +48 -50
- transformers/models/wavlm/modular_wavlm.py +5 -4
- transformers/models/whisper/configuration_whisper.py +5 -6
- transformers/models/whisper/english_normalizer.py +4 -3
- transformers/models/whisper/feature_extraction_whisper.py +24 -9
- transformers/models/whisper/generation_whisper.py +48 -26
- transformers/models/whisper/modeling_whisper.py +73 -79
- transformers/models/whisper/processing_whisper.py +20 -3
- transformers/models/whisper/tokenization_whisper.py +43 -11
- transformers/models/x_clip/configuration_x_clip.py +2 -4
- transformers/models/x_clip/modeling_x_clip.py +93 -96
- transformers/models/x_clip/processing_x_clip.py +14 -2
- transformers/models/xcodec/configuration_xcodec.py +6 -4
- transformers/models/xcodec/modeling_xcodec.py +17 -20
- transformers/models/xglm/configuration_xglm.py +8 -9
- transformers/models/xglm/modeling_xglm.py +55 -60
- transformers/models/xglm/tokenization_xglm.py +11 -3
- transformers/models/xlm/configuration_xlm.py +8 -10
- transformers/models/xlm/modeling_xlm.py +144 -144
- transformers/models/xlm/tokenization_xlm.py +5 -3
- transformers/models/xlm_roberta/configuration_xlm_roberta.py +3 -11
- transformers/models/xlm_roberta/modeling_xlm_roberta.py +194 -195
- transformers/models/xlm_roberta/modular_xlm_roberta.py +53 -50
- transformers/models/xlm_roberta/tokenization_xlm_roberta.py +18 -8
- transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +2 -10
- transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +93 -94
- transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py +70 -67
- transformers/models/xlnet/configuration_xlnet.py +12 -3
- transformers/models/xlnet/modeling_xlnet.py +163 -152
- transformers/models/xlnet/tokenization_xlnet.py +9 -2
- transformers/models/xlstm/configuration_xlstm.py +12 -8
- transformers/models/xlstm/modeling_xlstm.py +65 -62
- transformers/models/xmod/configuration_xmod.py +3 -11
- transformers/models/xmod/modeling_xmod.py +110 -108
- transformers/models/yolos/configuration_yolos.py +1 -0
- transformers/models/yolos/image_processing_yolos.py +62 -60
- transformers/models/yolos/image_processing_yolos_fast.py +45 -42
- transformers/models/yolos/modeling_yolos.py +16 -16
- transformers/models/yolos/modular_yolos.py +19 -17
- transformers/models/yoso/configuration_yoso.py +2 -8
- transformers/models/yoso/modeling_yoso.py +63 -70
- transformers/models/zamba/configuration_zamba.py +8 -5
- transformers/models/zamba/modeling_zamba.py +78 -81
- transformers/models/zamba2/configuration_zamba2.py +50 -44
- transformers/models/zamba2/modeling_zamba2.py +97 -97
- transformers/models/zamba2/modular_zamba2.py +48 -46
- transformers/models/zoedepth/configuration_zoedepth.py +2 -1
- transformers/models/zoedepth/image_processing_zoedepth.py +29 -28
- transformers/models/zoedepth/image_processing_zoedepth_fast.py +24 -21
- transformers/models/zoedepth/modeling_zoedepth.py +18 -26
- transformers/pipelines/__init__.py +114 -57
- transformers/pipelines/any_to_any.py +22 -14
- transformers/pipelines/audio_utils.py +2 -1
- transformers/pipelines/automatic_speech_recognition.py +12 -20
- transformers/pipelines/base.py +27 -15
- transformers/{models/pe_audio/processing_pe_audio.py → pipelines/deprecated/__init__.py} +3 -10
- transformers/pipelines/deprecated/text2text_generation.py +408 -0
- transformers/pipelines/document_question_answering.py +2 -4
- transformers/pipelines/image_text_to_text.py +1 -0
- transformers/pipelines/image_to_text.py +229 -0
- transformers/pipelines/question_answering.py +44 -5
- transformers/pipelines/text_classification.py +14 -1
- transformers/pipelines/text_generation.py +1 -1
- transformers/pipelines/text_to_audio.py +2 -2
- transformers/pipelines/token_classification.py +22 -1
- transformers/pipelines/video_classification.py +9 -1
- transformers/pipelines/zero_shot_audio_classification.py +1 -0
- transformers/pipelines/zero_shot_classification.py +6 -0
- transformers/pipelines/zero_shot_image_classification.py +7 -0
- transformers/processing_utils.py +145 -230
- transformers/quantizers/auto.py +4 -2
- transformers/quantizers/base.py +173 -53
- transformers/quantizers/quantizer_aqlm.py +23 -2
- transformers/quantizers/quantizer_auto_round.py +12 -2
- transformers/quantizers/quantizer_awq.py +89 -20
- transformers/quantizers/quantizer_bitnet.py +14 -4
- transformers/quantizers/quantizer_bnb_4bit.py +155 -18
- transformers/quantizers/quantizer_bnb_8bit.py +110 -24
- transformers/quantizers/quantizer_compressed_tensors.py +9 -2
- transformers/quantizers/quantizer_eetq.py +74 -16
- transformers/quantizers/quantizer_fbgemm_fp8.py +138 -38
- transformers/quantizers/quantizer_finegrained_fp8.py +113 -26
- transformers/quantizers/quantizer_fp_quant.py +82 -52
- transformers/quantizers/quantizer_gptq.py +28 -8
- transformers/quantizers/quantizer_higgs.py +60 -42
- transformers/quantizers/quantizer_hqq.py +153 -144
- transformers/quantizers/quantizer_mxfp4.py +194 -14
- transformers/quantizers/quantizer_quanto.py +79 -35
- transformers/quantizers/quantizer_quark.py +18 -36
- transformers/quantizers/quantizer_spqr.py +12 -4
- transformers/quantizers/quantizer_torchao.py +325 -50
- transformers/quantizers/quantizer_vptq.py +27 -4
- transformers/quantizers/quantizers_utils.py +0 -20
- transformers/safetensors_conversion.py +3 -9
- transformers/testing_utils.py +82 -326
- transformers/tokenization_mistral_common.py +903 -568
- transformers/tokenization_utils_base.py +340 -220
- transformers/tokenization_utils_sentencepiece.py +6 -5
- transformers/tokenization_utils_tokenizers.py +113 -226
- transformers/trainer.py +53 -60
- transformers/trainer_callback.py +0 -8
- transformers/trainer_seq2seq.py +1 -5
- transformers/trainer_utils.py +1 -1
- transformers/training_args.py +41 -77
- transformers/utils/__init__.py +4 -8
- transformers/utils/attention_visualizer.py +5 -5
- transformers/utils/auto_docstring.py +37 -599
- transformers/utils/doc.py +36 -4
- transformers/utils/dummy_pt_objects.py +42 -0
- transformers/utils/generic.py +28 -111
- transformers/utils/hub.py +15 -5
- transformers/utils/import_utils.py +32 -165
- transformers/utils/kernel_config.py +19 -74
- transformers/utils/loading_report.py +15 -25
- transformers/utils/quantization_config.py +241 -72
- transformers/video_processing_utils.py +39 -41
- transformers/video_utils.py +22 -18
- {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/METADATA +236 -284
- transformers-5.0.0rc0.dist-info/RECORD +1987 -0
- {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/WHEEL +1 -1
- transformers/integrations/moe.py +0 -360
- transformers/integrations/quark.py +0 -53
- transformers/loss/loss_lw_detr.py +0 -356
- transformers/models/ernie4_5_vl_moe/__init__.py +0 -31
- transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +0 -340
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +0 -455
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +0 -231
- transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +0 -1936
- transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +0 -1925
- transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +0 -249
- transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +0 -593
- transformers/models/fast_vlm/__init__.py +0 -27
- transformers/models/fast_vlm/configuration_fast_vlm.py +0 -137
- transformers/models/fast_vlm/modeling_fast_vlm.py +0 -432
- transformers/models/fast_vlm/modular_fast_vlm.py +0 -373
- transformers/models/glm4_moe_lite/__init__.py +0 -28
- transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +0 -233
- transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +0 -740
- transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +0 -302
- transformers/models/glm_image/__init__.py +0 -31
- transformers/models/glm_image/configuration_glm_image.py +0 -351
- transformers/models/glm_image/image_processing_glm_image.py +0 -503
- transformers/models/glm_image/image_processing_glm_image_fast.py +0 -294
- transformers/models/glm_image/modeling_glm_image.py +0 -1642
- transformers/models/glm_image/modular_glm_image.py +0 -1531
- transformers/models/glm_image/processing_glm_image.py +0 -217
- transformers/models/glmasr/__init__.py +0 -29
- transformers/models/glmasr/configuration_glmasr.py +0 -196
- transformers/models/glmasr/modeling_glmasr.py +0 -517
- transformers/models/glmasr/modular_glmasr.py +0 -443
- transformers/models/glmasr/processing_glmasr.py +0 -331
- transformers/models/jais2/__init__.py +0 -27
- transformers/models/jais2/configuration_jais2.py +0 -148
- transformers/models/jais2/modeling_jais2.py +0 -484
- transformers/models/jais2/modular_jais2.py +0 -194
- transformers/models/lasr/__init__.py +0 -29
- transformers/models/lasr/configuration_lasr.py +0 -244
- transformers/models/lasr/feature_extraction_lasr.py +0 -275
- transformers/models/lasr/modeling_lasr.py +0 -727
- transformers/models/lasr/modular_lasr.py +0 -574
- transformers/models/lasr/processing_lasr.py +0 -100
- transformers/models/lasr/tokenization_lasr.py +0 -184
- transformers/models/lighton_ocr/__init__.py +0 -28
- transformers/models/lighton_ocr/configuration_lighton_ocr.py +0 -128
- transformers/models/lighton_ocr/modeling_lighton_ocr.py +0 -463
- transformers/models/lighton_ocr/modular_lighton_ocr.py +0 -404
- transformers/models/lighton_ocr/processing_lighton_ocr.py +0 -229
- transformers/models/lw_detr/__init__.py +0 -27
- transformers/models/lw_detr/configuration_lw_detr.py +0 -374
- transformers/models/lw_detr/modeling_lw_detr.py +0 -1702
- transformers/models/lw_detr/modular_lw_detr.py +0 -1615
- transformers/models/minimax_m2/__init__.py +0 -28
- transformers/models/minimax_m2/configuration_minimax_m2.py +0 -188
- transformers/models/minimax_m2/modeling_minimax_m2.py +0 -704
- transformers/models/minimax_m2/modular_minimax_m2.py +0 -346
- transformers/models/paddleocr_vl/__init__.py +0 -31
- transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +0 -335
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +0 -503
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +0 -209
- transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +0 -1683
- transformers/models/paddleocr_vl/modular_paddleocr_vl.py +0 -1380
- transformers/models/paddleocr_vl/processing_paddleocr_vl.py +0 -133
- transformers/models/pe_audio/__init__.py +0 -29
- transformers/models/pe_audio/configuration_pe_audio.py +0 -204
- transformers/models/pe_audio/feature_extraction_pe_audio.py +0 -160
- transformers/models/pe_audio/modeling_pe_audio.py +0 -819
- transformers/models/pe_audio/modular_pe_audio.py +0 -298
- transformers/models/pe_audio_video/__init__.py +0 -28
- transformers/models/pe_audio_video/configuration_pe_audio_video.py +0 -223
- transformers/models/pe_audio_video/modeling_pe_audio_video.py +0 -971
- transformers/models/pe_audio_video/modular_pe_audio_video.py +0 -763
- transformers/models/pe_video/__init__.py +0 -29
- transformers/models/pe_video/configuration_pe_video.py +0 -209
- transformers/models/pe_video/modeling_pe_video.py +0 -647
- transformers/models/pe_video/modular_pe_video.py +0 -231
- transformers/models/pe_video/processing_pe_video.py +0 -10
- transformers/models/pe_video/video_processing_pe_video.py +0 -64
- transformers/models/pixio/__init__.py +0 -29
- transformers/models/pixio/configuration_pixio.py +0 -150
- transformers/models/pixio/modeling_pixio.py +0 -507
- transformers/models/pixio/modular_pixio.py +0 -403
- transformers/models/solar_open/__init__.py +0 -27
- transformers/models/solar_open/configuration_solar_open.py +0 -184
- transformers/models/solar_open/modeling_solar_open.py +0 -642
- transformers/models/solar_open/modular_solar_open.py +0 -224
- transformers/trainer_jit_checkpoint.py +0 -125
- transformers-5.0.0.dist-info/RECORD +0 -2068
- {transformers-5.0.0.dist-info/licenses → transformers-5.0.0rc0.dist-info}/LICENSE +0 -0
- {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/entry_points.txt +0 -0
- {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# coding=utf-8
|
|
1
2
|
# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
|
2
3
|
#
|
|
3
4
|
#
|
|
@@ -17,7 +18,7 @@
|
|
|
17
18
|
import math
|
|
18
19
|
from collections.abc import Callable
|
|
19
20
|
from dataclasses import dataclass
|
|
20
|
-
from typing import Any, Optional
|
|
21
|
+
from typing import Any, Optional, Union
|
|
21
22
|
|
|
22
23
|
import numpy as np
|
|
23
24
|
import torch
|
|
@@ -25,40 +26,36 @@ import torch.nn.functional as F
|
|
|
25
26
|
from torch import nn
|
|
26
27
|
from torch.nn import Parameter
|
|
27
28
|
|
|
28
|
-
from
|
|
29
|
+
from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding, rotate_half
|
|
30
|
+
from transformers.models.qwen2_5_vl.configuration_qwen2_5_vl import Qwen2_5_VLVisionConfig
|
|
31
|
+
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
|
|
32
|
+
Qwen2_5_VisionTransformerPretrainedModel,
|
|
33
|
+
Qwen2_5_VLAttention,
|
|
34
|
+
Qwen2_5_VLMLP,
|
|
35
|
+
Qwen2_5_VLPreTrainedModel,
|
|
36
|
+
Qwen2_5_VLTextModel,
|
|
37
|
+
Qwen2_5_VLVisionBlock,
|
|
38
|
+
eager_attention_forward,
|
|
39
|
+
)
|
|
40
|
+
from transformers.models.qwen2_audio.configuration_qwen2_audio import Qwen2AudioEncoderConfig
|
|
41
|
+
from transformers.models.qwen2_audio.modeling_qwen2_audio import Qwen2AudioEncoderLayer
|
|
42
|
+
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLRotaryEmbedding
|
|
43
|
+
|
|
29
44
|
from ...cache_utils import Cache
|
|
30
45
|
from ...configuration_utils import PreTrainedConfig, layer_type_validation
|
|
31
46
|
from ...generation import GenerationMixin
|
|
32
|
-
from ...modeling_outputs import
|
|
47
|
+
from ...modeling_outputs import BaseModelOutput, ModelOutput
|
|
33
48
|
from ...modeling_rope_utils import RopeParameters
|
|
34
|
-
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
49
|
+
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
|
|
35
50
|
from ...processing_utils import Unpack
|
|
36
51
|
from ...utils import (
|
|
37
52
|
TransformersKwargs,
|
|
38
53
|
auto_docstring,
|
|
39
|
-
can_return_tuple,
|
|
40
54
|
check_torch_load_is_safe,
|
|
41
55
|
logging,
|
|
42
|
-
torch_compilable_check,
|
|
43
56
|
)
|
|
44
57
|
from ...utils.deprecation import deprecate_kwarg
|
|
45
|
-
from ...utils.generic import check_model_inputs, is_flash_attention_requested
|
|
46
58
|
from ...utils.hub import cached_file
|
|
47
|
-
from ..llama.modeling_llama import LlamaRotaryEmbedding, rotate_half
|
|
48
|
-
from ..qwen2_5_vl.configuration_qwen2_5_vl import Qwen2_5_VLVisionConfig
|
|
49
|
-
from ..qwen2_5_vl.modeling_qwen2_5_vl import (
|
|
50
|
-
Qwen2_5_VisionRotaryEmbedding,
|
|
51
|
-
Qwen2_5_VisionTransformerPretrainedModel,
|
|
52
|
-
Qwen2_5_VLAttention,
|
|
53
|
-
Qwen2_5_VLMLP,
|
|
54
|
-
Qwen2_5_VLPreTrainedModel,
|
|
55
|
-
Qwen2_5_VLTextModel,
|
|
56
|
-
Qwen2_5_VLVisionBlock,
|
|
57
|
-
eager_attention_forward,
|
|
58
|
-
)
|
|
59
|
-
from ..qwen2_audio.configuration_qwen2_audio import Qwen2AudioEncoderConfig
|
|
60
|
-
from ..qwen2_audio.modeling_qwen2_audio import Qwen2AudioEncoderLayer
|
|
61
|
-
from ..qwen2_vl.modeling_qwen2_vl import Qwen2VLRotaryEmbedding
|
|
62
59
|
|
|
63
60
|
|
|
64
61
|
logger = logging.get_logger(__name__)
|
|
@@ -348,26 +345,24 @@ class Qwen2_5OmniTextConfig(PreTrainedConfig):
|
|
|
348
345
|
|
|
349
346
|
def __init__(
|
|
350
347
|
self,
|
|
351
|
-
vocab_size: int
|
|
352
|
-
hidden_size: int
|
|
353
|
-
intermediate_size: int
|
|
354
|
-
num_hidden_layers: int
|
|
355
|
-
num_attention_heads: int
|
|
356
|
-
num_key_value_heads: int
|
|
357
|
-
hidden_act: str
|
|
358
|
-
max_position_embeddings: int
|
|
359
|
-
initializer_range: float
|
|
360
|
-
rms_norm_eps: int
|
|
361
|
-
use_cache: bool
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
bos_token_id: int | None = None,
|
|
370
|
-
eos_token_id: int | None = None,
|
|
348
|
+
vocab_size: Optional[int] = 152064,
|
|
349
|
+
hidden_size: Optional[int] = 3584,
|
|
350
|
+
intermediate_size: Optional[int] = 18944,
|
|
351
|
+
num_hidden_layers: Optional[int] = 28,
|
|
352
|
+
num_attention_heads: Optional[int] = 28,
|
|
353
|
+
num_key_value_heads: Optional[int] = 4,
|
|
354
|
+
hidden_act: Optional[str] = "silu",
|
|
355
|
+
max_position_embeddings: Optional[int] = 32768,
|
|
356
|
+
initializer_range: Optional[float] = 0.02,
|
|
357
|
+
rms_norm_eps: Optional[int] = 1e-6,
|
|
358
|
+
use_cache: Optional[bool] = True,
|
|
359
|
+
tie_word_embeddings: Optional[bool] = False,
|
|
360
|
+
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
|
|
361
|
+
use_sliding_window: Optional[bool] = False,
|
|
362
|
+
sliding_window: Optional[int] = 32768,
|
|
363
|
+
max_window_layers: Optional[int] = 28,
|
|
364
|
+
layer_types: Optional[list[str]] = None,
|
|
365
|
+
attention_dropout: Optional[float] = 0.0,
|
|
371
366
|
**kwargs,
|
|
372
367
|
):
|
|
373
368
|
self.vocab_size = vocab_size
|
|
@@ -379,9 +374,6 @@ class Qwen2_5OmniTextConfig(PreTrainedConfig):
|
|
|
379
374
|
self.use_sliding_window = use_sliding_window
|
|
380
375
|
self.sliding_window = sliding_window if self.use_sliding_window else None
|
|
381
376
|
self.max_window_layers = max_window_layers
|
|
382
|
-
self.pad_token_id = pad_token_id
|
|
383
|
-
self.bos_token_id = bos_token_id
|
|
384
|
-
self.eos_token_id = eos_token_id
|
|
385
377
|
|
|
386
378
|
# for backward compatibility
|
|
387
379
|
if num_key_value_heads is None:
|
|
@@ -406,7 +398,8 @@ class Qwen2_5OmniTextConfig(PreTrainedConfig):
|
|
|
406
398
|
|
|
407
399
|
self.rope_parameters = rope_parameters
|
|
408
400
|
super().__init__(
|
|
409
|
-
|
|
401
|
+
tie_word_embeddings=tie_word_embeddings,
|
|
402
|
+
ignore_keys_at_rope_validation={"mrope"},
|
|
410
403
|
**kwargs,
|
|
411
404
|
)
|
|
412
405
|
|
|
@@ -447,8 +440,6 @@ class Qwen2_5OmniThinkerConfig(PreTrainedConfig):
|
|
|
447
440
|
The user token index to encode the user token.
|
|
448
441
|
initializer_range (`float`, *optional*, defaults to 0.02):
|
|
449
442
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
|
450
|
-
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
|
451
|
-
Whether the model's input and output word embeddings should be tied.
|
|
452
443
|
|
|
453
444
|
Example:
|
|
454
445
|
|
|
@@ -500,7 +491,6 @@ class Qwen2_5OmniThinkerConfig(PreTrainedConfig):
|
|
|
500
491
|
audio_end_token_id=151648,
|
|
501
492
|
user_token_id=872,
|
|
502
493
|
initializer_range=0.02,
|
|
503
|
-
tie_word_embeddings=False,
|
|
504
494
|
**kwargs,
|
|
505
495
|
):
|
|
506
496
|
self.audio_token_index = audio_token_index
|
|
@@ -512,7 +502,6 @@ class Qwen2_5OmniThinkerConfig(PreTrainedConfig):
|
|
|
512
502
|
self.audio_start_token_id = audio_start_token_id
|
|
513
503
|
self.audio_end_token_id = audio_end_token_id
|
|
514
504
|
self.initializer_range = initializer_range
|
|
515
|
-
self.tie_word_embeddings = tie_word_embeddings
|
|
516
505
|
|
|
517
506
|
if isinstance(vision_config, dict):
|
|
518
507
|
vision_config = Qwen2_5OmniVisionEncoderConfig(**vision_config)
|
|
@@ -692,7 +681,7 @@ class Qwen2_5OmniTalkerConfig(PreTrainedConfig):
|
|
|
692
681
|
sliding_window=32768,
|
|
693
682
|
max_window_layers=28,
|
|
694
683
|
attention_dropout=0.0,
|
|
695
|
-
rope_parameters: RopeParameters | dict[str, RopeParameters]
|
|
684
|
+
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
|
|
696
685
|
position_id_per_seconds=25,
|
|
697
686
|
seconds_per_chunk=2,
|
|
698
687
|
audio_start_token_id=151647,
|
|
@@ -746,7 +735,6 @@ class Qwen2_5OmniTalkerConfig(PreTrainedConfig):
|
|
|
746
735
|
|
|
747
736
|
self.initializer_range = initializer_range
|
|
748
737
|
self.spatial_merge_size = spatial_merge_size
|
|
749
|
-
self.tie_word_embeddings = tie_word_embeddings
|
|
750
738
|
|
|
751
739
|
self.layer_types = layer_types
|
|
752
740
|
if self.layer_types is None:
|
|
@@ -759,7 +747,7 @@ class Qwen2_5OmniTalkerConfig(PreTrainedConfig):
|
|
|
759
747
|
layer_type_validation(self.layer_types, self.num_hidden_layers)
|
|
760
748
|
|
|
761
749
|
self.rope_parameters = rope_parameters
|
|
762
|
-
super().__init__(ignore_keys_at_rope_validation={"
|
|
750
|
+
super().__init__(tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope"}, **kwargs)
|
|
763
751
|
|
|
764
752
|
|
|
765
753
|
class Qwen2_5OmniDiTConfig(PreTrainedConfig):
|
|
@@ -817,7 +805,7 @@ class Qwen2_5OmniDiTConfig(PreTrainedConfig):
|
|
|
817
805
|
ff_mult=2,
|
|
818
806
|
emb_dim=512,
|
|
819
807
|
head_dim=64,
|
|
820
|
-
rope_parameters: RopeParameters | dict[str, RopeParameters]
|
|
808
|
+
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
|
|
821
809
|
max_position_embeddings=32768,
|
|
822
810
|
block_size=24,
|
|
823
811
|
look_ahead_layers=[10],
|
|
@@ -1064,23 +1052,6 @@ class Qwen2_5OmniPreTrainedModel(Qwen2_5_VLPreTrainedModel):
|
|
|
1064
1052
|
input_modalities = ("image", "video", "audio", "text")
|
|
1065
1053
|
_can_compile_fullgraph = False
|
|
1066
1054
|
|
|
1067
|
-
def _init_weights(self, module):
|
|
1068
|
-
PreTrainedModel._init_weights(self, module)
|
|
1069
|
-
if isinstance(module, SinusoidsPositionEmbedding):
|
|
1070
|
-
log_timescale_increment = np.log(module.max_timescale) / (module.channels // 2 - 1)
|
|
1071
|
-
inv_timescales = torch.exp(-log_timescale_increment * torch.arange(module.channels // 2).float())
|
|
1072
|
-
scaled_time = torch.arange(module.length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
|
|
1073
|
-
init.copy_(module.positional_embedding, torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1))
|
|
1074
|
-
elif isinstance(module, UpSample1d):
|
|
1075
|
-
filter_tensor = kaiser_sinc_filter1d(0.5 / module.ratio, 0.6 / module.ratio, module.kernel_size)
|
|
1076
|
-
init.copy_(module.filter, filter_tensor)
|
|
1077
|
-
elif isinstance(module, DownSample1d):
|
|
1078
|
-
filter_tensor = kaiser_sinc_filter1d(module.cutoff, module.half_width, module.kernel_size)
|
|
1079
|
-
init.copy_(module.filter, filter_tensor)
|
|
1080
|
-
elif isinstance(module, Qwen2_5_VisionRotaryEmbedding):
|
|
1081
|
-
inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
|
|
1082
|
-
init.copy_(module.inv_freq, inv_freq)
|
|
1083
|
-
|
|
1084
1055
|
|
|
1085
1056
|
class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel):
|
|
1086
1057
|
input_modalities = ("image", "video", "audio", "text")
|
|
@@ -1199,13 +1170,13 @@ class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedMo
|
|
|
1199
1170
|
|
|
1200
1171
|
def get_rope_index(
|
|
1201
1172
|
self,
|
|
1202
|
-
input_ids: torch.LongTensor
|
|
1203
|
-
image_grid_thw: torch.LongTensor
|
|
1204
|
-
video_grid_thw: torch.LongTensor
|
|
1205
|
-
attention_mask: torch.Tensor
|
|
1173
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
1174
|
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
|
1175
|
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
|
1176
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
1206
1177
|
use_audio_in_video: bool = False,
|
|
1207
|
-
audio_seqlens: torch.LongTensor
|
|
1208
|
-
second_per_grids: torch.Tensor
|
|
1178
|
+
audio_seqlens: Optional[torch.LongTensor] = None,
|
|
1179
|
+
second_per_grids: Optional[torch.Tensor] = None,
|
|
1209
1180
|
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
1210
1181
|
"""
|
|
1211
1182
|
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
|
|
@@ -1513,12 +1484,12 @@ class Qwen2_5OmniThinkerCausalLMOutputWithPast(ModelOutput):
|
|
|
1513
1484
|
The rope index difference between sequence length and multimodal rope.
|
|
1514
1485
|
"""
|
|
1515
1486
|
|
|
1516
|
-
loss: torch.FloatTensor
|
|
1517
|
-
logits: torch.FloatTensor
|
|
1518
|
-
past_key_values: Cache
|
|
1519
|
-
hidden_states: tuple[torch.FloatTensor]
|
|
1520
|
-
attentions: tuple[torch.FloatTensor]
|
|
1521
|
-
rope_deltas: torch.LongTensor
|
|
1487
|
+
loss: Optional[torch.FloatTensor] = None
|
|
1488
|
+
logits: Optional[torch.FloatTensor] = None
|
|
1489
|
+
past_key_values: Optional[Cache] = None
|
|
1490
|
+
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
|
1491
|
+
attentions: Optional[tuple[torch.FloatTensor]] = None
|
|
1492
|
+
rope_deltas: Optional[torch.LongTensor] = None
|
|
1522
1493
|
|
|
1523
1494
|
|
|
1524
1495
|
class Qwen2_5OmniAudioAttention(nn.Module):
|
|
@@ -1554,10 +1525,10 @@ class Qwen2_5OmniAudioAttention(nn.Module):
|
|
|
1554
1525
|
def forward(
|
|
1555
1526
|
self,
|
|
1556
1527
|
hidden_states: torch.Tensor,
|
|
1557
|
-
cu_seqlens: torch.Tensor
|
|
1558
|
-
attention_mask: torch.Tensor
|
|
1528
|
+
cu_seqlens: Optional[torch.Tensor] = None,
|
|
1529
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
1559
1530
|
**kwargs,
|
|
1560
|
-
) -> tuple[torch.Tensor, torch.Tensor
|
|
1531
|
+
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
|
|
1561
1532
|
"""Input shape: Batch x Time x Channel"""
|
|
1562
1533
|
|
|
1563
1534
|
seq_length, _ = hidden_states.size()
|
|
@@ -1606,7 +1577,7 @@ class Qwen2_5OmniAudioEncoderLayer(Qwen2AudioEncoderLayer):
|
|
|
1606
1577
|
self,
|
|
1607
1578
|
hidden_states: torch.Tensor,
|
|
1608
1579
|
cu_seqlens: torch.Tensor,
|
|
1609
|
-
attention_mask: torch.Tensor
|
|
1580
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
1610
1581
|
**kwargs,
|
|
1611
1582
|
) -> torch.Tensor:
|
|
1612
1583
|
residual = hidden_states
|
|
@@ -1637,9 +1608,6 @@ class Qwen2_5OmniAudioEncoderLayer(Qwen2AudioEncoderLayer):
|
|
|
1637
1608
|
class SinusoidsPositionEmbedding(nn.Module):
|
|
1638
1609
|
def __init__(self, length, channels, max_timescale=10000):
|
|
1639
1610
|
super().__init__()
|
|
1640
|
-
self.length = length
|
|
1641
|
-
self.channels = channels
|
|
1642
|
-
self.max_timescale = max_timescale
|
|
1643
1611
|
if channels % 2 != 0:
|
|
1644
1612
|
raise ValueError("SinusoidsPositionEmbedding needs even channels input")
|
|
1645
1613
|
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
|
|
@@ -1667,10 +1635,6 @@ class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
|
|
|
1667
1635
|
input_modalities = "audio"
|
|
1668
1636
|
_no_split_modules = ["Qwen2_5OmniAudioEncoderLayer"]
|
|
1669
1637
|
_supports_sdpa = True
|
|
1670
|
-
_can_record_outputs = {
|
|
1671
|
-
"hidden_states": Qwen2_5OmniAudioEncoderLayer,
|
|
1672
|
-
"attentions": Qwen2_5OmniAudioAttention,
|
|
1673
|
-
}
|
|
1674
1638
|
|
|
1675
1639
|
def __init__(self, config: Qwen2_5OmniAudioEncoderConfig):
|
|
1676
1640
|
super().__init__(config)
|
|
@@ -1709,7 +1673,7 @@ class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
|
|
|
1709
1673
|
# NOTE: the created attention masl only approximates the ragged FA2 attention by
|
|
1710
1674
|
# allowing bidirectional attention within `cu_seqlens` blocks, and not attending between
|
|
1711
1675
|
# blocks. Though it will not be a 100% match for FA2's `varlen` path
|
|
1712
|
-
if
|
|
1676
|
+
if self.config._attn_implementation == "flash_attention_2":
|
|
1713
1677
|
return None
|
|
1714
1678
|
|
|
1715
1679
|
seq_length = inputs_tensor.shape[0]
|
|
@@ -1723,9 +1687,14 @@ class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
|
|
|
1723
1687
|
attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0
|
|
1724
1688
|
return attention_mask
|
|
1725
1689
|
|
|
1726
|
-
@check_model_inputs(tie_last_hidden_states=False)
|
|
1727
1690
|
@auto_docstring
|
|
1728
|
-
def forward(
|
|
1691
|
+
def forward(
|
|
1692
|
+
self,
|
|
1693
|
+
input_features,
|
|
1694
|
+
feature_lens=None,
|
|
1695
|
+
aftercnn_lens=None,
|
|
1696
|
+
**kwargs,
|
|
1697
|
+
):
|
|
1729
1698
|
r"""
|
|
1730
1699
|
feature_lens (`torch.LongTensor` of shape `(batch_size,)`):
|
|
1731
1700
|
mel length
|
|
@@ -1734,7 +1703,11 @@ class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
|
|
|
1734
1703
|
"""
|
|
1735
1704
|
chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long()
|
|
1736
1705
|
|
|
1737
|
-
chunk_lengths = torch.
|
|
1706
|
+
chunk_lengths = torch.tensor(
|
|
1707
|
+
[self.n_window * 2] * chunk_num.sum(),
|
|
1708
|
+
dtype=torch.long,
|
|
1709
|
+
device=feature_lens.device,
|
|
1710
|
+
)
|
|
1738
1711
|
tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:]
|
|
1739
1712
|
chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2)
|
|
1740
1713
|
chunk_lengths = torch.where(chunk_lengths == 0, self.n_window * 2, chunk_lengths)
|
|
@@ -1775,7 +1748,7 @@ class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
|
|
|
1775
1748
|
each_audio_states = self.proj(each_audio_states)
|
|
1776
1749
|
token_audio_list.append(each_audio_states)
|
|
1777
1750
|
token_audio = torch.cat(token_audio_list, dim=0)
|
|
1778
|
-
return
|
|
1751
|
+
return BaseModelOutput(last_hidden_state=token_audio)
|
|
1779
1752
|
|
|
1780
1753
|
def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"):
|
|
1781
1754
|
"""
|
|
@@ -1857,7 +1830,7 @@ class Qwen2_5OmniVisionAttention(nn.Module):
|
|
|
1857
1830
|
self,
|
|
1858
1831
|
hidden_states: torch.Tensor,
|
|
1859
1832
|
cu_seqlens: torch.Tensor,
|
|
1860
|
-
rotary_pos_emb: torch.Tensor
|
|
1833
|
+
rotary_pos_emb: Optional[torch.Tensor] = None,
|
|
1861
1834
|
**kwargs,
|
|
1862
1835
|
) -> torch.Tensor:
|
|
1863
1836
|
seq_length = hidden_states.shape[0]
|
|
@@ -1875,7 +1848,7 @@ class Qwen2_5OmniVisionAttention(nn.Module):
|
|
|
1875
1848
|
if self.config._attn_implementation != "eager":
|
|
1876
1849
|
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
|
1877
1850
|
|
|
1878
|
-
if
|
|
1851
|
+
if self.config._attn_implementation == "flash_attention_2":
|
|
1879
1852
|
# Flash Attention 2: Use cu_seqlens for variable length attention
|
|
1880
1853
|
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
|
|
1881
1854
|
attn_output, _ = attention_interface(
|
|
@@ -1930,7 +1903,7 @@ class Qwen2_5OmniVisionBlock(Qwen2_5_VLVisionBlock):
|
|
|
1930
1903
|
self,
|
|
1931
1904
|
hidden_states: torch.Tensor,
|
|
1932
1905
|
cu_seqlens: torch.Tensor,
|
|
1933
|
-
rotary_pos_emb: torch.Tensor
|
|
1906
|
+
rotary_pos_emb: Optional[torch.Tensor] = None,
|
|
1934
1907
|
**kwargs,
|
|
1935
1908
|
) -> torch.Tensor:
|
|
1936
1909
|
hidden_states = hidden_states + self.attn(
|
|
@@ -1943,28 +1916,17 @@ class Qwen2_5OmniVisionBlock(Qwen2_5_VLVisionBlock):
|
|
|
1943
1916
|
return hidden_states
|
|
1944
1917
|
|
|
1945
1918
|
|
|
1946
|
-
class Qwen2_5_VisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding):
|
|
1947
|
-
pass
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
1919
|
class Qwen2_5OmniVisionEncoder(Qwen2_5_VisionTransformerPretrainedModel):
|
|
1951
1920
|
config: Qwen2_5OmniVisionEncoderConfig
|
|
1952
1921
|
input_modalities = ("image", "video")
|
|
1953
1922
|
_no_split_modules = ["Qwen2_5OmniVisionBlock"]
|
|
1954
1923
|
_input_embed_layer = "patch_embed"
|
|
1955
|
-
_can_record_outputs = {
|
|
1956
|
-
"hidden_states": Qwen2_5OmniVisionBlock,
|
|
1957
|
-
"attentions": Qwen2_5OmniVisionAttention,
|
|
1958
|
-
}
|
|
1959
1924
|
|
|
1960
1925
|
def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None:
|
|
1961
1926
|
super().__init__(config, *inputs, **kwargs)
|
|
1962
1927
|
self.blocks = nn.ModuleList([Qwen2_5OmniVisionBlock(config) for _ in range(config.depth)])
|
|
1963
1928
|
|
|
1964
|
-
|
|
1965
|
-
def forward(
|
|
1966
|
-
self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
|
|
1967
|
-
) -> tuple | BaseModelOutputWithPooling:
|
|
1929
|
+
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
|
|
1968
1930
|
"""
|
|
1969
1931
|
Args:
|
|
1970
1932
|
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
|
|
@@ -2017,15 +1979,11 @@ class Qwen2_5OmniVisionEncoder(Qwen2_5_VisionTransformerPretrainedModel):
|
|
|
2017
1979
|
rotary_pos_emb=rotary_pos_emb,
|
|
2018
1980
|
**kwargs,
|
|
2019
1981
|
)
|
|
2020
|
-
|
|
2021
|
-
merged_hidden_states = self.merger(hidden_states)
|
|
1982
|
+
hidden_states = self.merger(hidden_states)
|
|
2022
1983
|
reverse_indices = torch.argsort(window_index)
|
|
2023
|
-
|
|
1984
|
+
hidden_states = hidden_states[reverse_indices, :]
|
|
2024
1985
|
|
|
2025
|
-
return
|
|
2026
|
-
last_hidden_state=hidden_states,
|
|
2027
|
-
pooler_output=merged_hidden_states,
|
|
2028
|
-
)
|
|
1986
|
+
return hidden_states
|
|
2029
1987
|
|
|
2030
1988
|
|
|
2031
1989
|
class Qwen2_5OmniRotaryEmbedding(Qwen2VLRotaryEmbedding):
|
|
@@ -2036,7 +1994,7 @@ class Qwen2_5OmniRotaryEmbedding(Qwen2VLRotaryEmbedding):
|
|
|
2036
1994
|
# It's same as `Qwen2_5_VLAttention`, but talker model's hidden_size isn't divisible by num_heads.
|
|
2037
1995
|
# Removes the value error as a workaround.
|
|
2038
1996
|
class Qwen2_5OmniAttention(Qwen2_5_VLAttention):
|
|
2039
|
-
def __init__(self, config: Qwen2_5OmniConfig, layer_idx: int
|
|
1997
|
+
def __init__(self, config: Qwen2_5OmniConfig, layer_idx: Optional[int] = None):
|
|
2040
1998
|
nn.Module.__init__(self)
|
|
2041
1999
|
self.config = config
|
|
2042
2000
|
self.layer_idx = layer_idx
|
|
@@ -2095,6 +2053,7 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2095
2053
|
self.vocab_size = config.text_config.vocab_size
|
|
2096
2054
|
self.model = Qwen2_5OmniThinkerTextModel._from_config(config.text_config)
|
|
2097
2055
|
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
|
|
2056
|
+
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
|
|
2098
2057
|
self.spatial_merge_size = config.vision_config.spatial_merge_size
|
|
2099
2058
|
self.rope_deltas = None
|
|
2100
2059
|
self.post_init()
|
|
@@ -2105,56 +2064,52 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2105
2064
|
def set_input_embeddings(self, value):
|
|
2106
2065
|
self.model.set_input_embeddings(value)
|
|
2107
2066
|
|
|
2108
|
-
@can_return_tuple
|
|
2109
|
-
@auto_docstring
|
|
2110
2067
|
def get_video_features(
|
|
2111
|
-
self,
|
|
2112
|
-
|
|
2113
|
-
|
|
2114
|
-
|
|
2115
|
-
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
|
-
|
|
2119
|
-
|
|
2120
|
-
|
|
2068
|
+
self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
|
|
2069
|
+
):
|
|
2070
|
+
"""
|
|
2071
|
+
Encodes videos into continuous embeddings that can be forwarded to the language model.
|
|
2072
|
+
|
|
2073
|
+
Args:
|
|
2074
|
+
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
|
2075
|
+
The tensors corresponding to the input videos.
|
|
2076
|
+
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
|
|
2077
|
+
The temporal, height and width of feature shape of each video in LLM.
|
|
2121
2078
|
"""
|
|
2122
2079
|
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
|
|
2123
|
-
|
|
2080
|
+
video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
|
|
2081
|
+
return video_embeds
|
|
2124
2082
|
|
|
2125
|
-
|
|
2126
|
-
|
|
2127
|
-
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
|
2135
|
-
The tensors corresponding to the input images.
|
|
2136
|
-
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
|
2137
|
-
The temporal, height and width of feature shape of each image in LLM.
|
|
2083
|
+
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
|
|
2084
|
+
"""
|
|
2085
|
+
Encodes images into continuous embeddings that can be forwarded to the language model.
|
|
2086
|
+
|
|
2087
|
+
Args:
|
|
2088
|
+
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
|
|
2089
|
+
The tensors corresponding to the input images.
|
|
2090
|
+
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
|
2091
|
+
The temporal, height and width of feature shape of each image in LLM.
|
|
2138
2092
|
"""
|
|
2139
2093
|
pixel_values = pixel_values.type(self.visual.dtype)
|
|
2140
|
-
|
|
2094
|
+
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
|
|
2095
|
+
return image_embeds
|
|
2141
2096
|
|
|
2142
|
-
@can_return_tuple
|
|
2143
|
-
@auto_docstring
|
|
2144
2097
|
def get_audio_features(
|
|
2145
2098
|
self,
|
|
2146
2099
|
input_features: torch.FloatTensor,
|
|
2147
|
-
feature_attention_mask: torch.LongTensor
|
|
2148
|
-
audio_feature_lengths: torch.LongTensor
|
|
2149
|
-
|
|
2150
|
-
|
|
2151
|
-
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2100
|
+
feature_attention_mask: Optional[torch.LongTensor] = None,
|
|
2101
|
+
audio_feature_lengths: Optional[torch.LongTensor] = None,
|
|
2102
|
+
):
|
|
2103
|
+
"""
|
|
2104
|
+
Encodes audios into continuous embeddings that can be forwarded to the language model.
|
|
2105
|
+
|
|
2106
|
+
Args:
|
|
2107
|
+
input_features (`torch.FloatTensor`):
|
|
2108
|
+
The tensors corresponding to the input audios.
|
|
2109
|
+
feature_attention_mask (`torch.LongTensor`, *optional*):
|
|
2110
|
+
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
|
|
2111
|
+
audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
|
|
2112
|
+
The length of feature shape of each audio in LLM.
|
|
2158
2113
|
"""
|
|
2159
2114
|
if feature_attention_mask is not None:
|
|
2160
2115
|
audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
|
|
@@ -2170,20 +2125,20 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2170
2125
|
input_features,
|
|
2171
2126
|
feature_lens=feature_lens,
|
|
2172
2127
|
aftercnn_lens=audio_feat_lengths,
|
|
2173
|
-
return_dict=True,
|
|
2174
|
-
**kwargs,
|
|
2175
2128
|
)
|
|
2176
|
-
|
|
2129
|
+
audio_features = audio_outputs.last_hidden_state
|
|
2130
|
+
|
|
2131
|
+
if audio_features.shape[0] != sum(audio_output_lengths.tolist()):
|
|
2177
2132
|
raise ValueError("length of audio_features should match audio_output_lengths")
|
|
2178
2133
|
|
|
2179
|
-
return
|
|
2134
|
+
return audio_features
|
|
2180
2135
|
|
|
2181
2136
|
def get_placeholder_mask(
|
|
2182
2137
|
self,
|
|
2183
2138
|
input_ids: torch.LongTensor,
|
|
2184
2139
|
inputs_embeds: torch.FloatTensor,
|
|
2185
|
-
image_features: torch.FloatTensor
|
|
2186
|
-
video_features: torch.FloatTensor
|
|
2140
|
+
image_features: Optional[torch.FloatTensor] = None,
|
|
2141
|
+
video_features: Optional[torch.FloatTensor] = None,
|
|
2187
2142
|
):
|
|
2188
2143
|
"""
|
|
2189
2144
|
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
|
|
@@ -2211,18 +2166,16 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2211
2166
|
|
|
2212
2167
|
n_image_tokens = special_image_mask.sum()
|
|
2213
2168
|
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
|
|
2214
|
-
if image_features is not None:
|
|
2215
|
-
|
|
2216
|
-
|
|
2217
|
-
f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {image_features.shape[0]}",
|
|
2169
|
+
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
|
|
2170
|
+
raise ValueError(
|
|
2171
|
+
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
|
|
2218
2172
|
)
|
|
2219
2173
|
|
|
2220
2174
|
n_video_tokens = special_video_mask.sum()
|
|
2221
2175
|
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
|
|
2222
|
-
if video_features is not None:
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
f"Video features and video tokens do not match, tokens: {n_video_tokens}, features: {video_features.shape[0]}",
|
|
2176
|
+
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
|
|
2177
|
+
raise ValueError(
|
|
2178
|
+
f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
|
|
2226
2179
|
)
|
|
2227
2180
|
|
|
2228
2181
|
special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
|
|
@@ -2231,29 +2184,29 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2231
2184
|
@auto_docstring
|
|
2232
2185
|
def forward(
|
|
2233
2186
|
self,
|
|
2234
|
-
input_ids: torch.LongTensor
|
|
2235
|
-
input_features: torch.FloatTensor
|
|
2236
|
-
pixel_values: torch.FloatTensor
|
|
2237
|
-
pixel_values_videos: torch.FloatTensor
|
|
2238
|
-
image_grid_thw: torch.LongTensor
|
|
2239
|
-
video_grid_thw: torch.LongTensor
|
|
2240
|
-
attention_mask: torch.Tensor
|
|
2241
|
-
feature_attention_mask: torch.Tensor
|
|
2242
|
-
audio_feature_lengths: torch.LongTensor
|
|
2243
|
-
position_ids: torch.LongTensor
|
|
2244
|
-
past_key_values: Cache
|
|
2245
|
-
inputs_embeds: torch.FloatTensor
|
|
2246
|
-
rope_deltas: torch.LongTensor
|
|
2247
|
-
labels: torch.LongTensor
|
|
2248
|
-
use_cache: bool
|
|
2249
|
-
output_attentions: bool
|
|
2250
|
-
output_hidden_states: bool
|
|
2251
|
-
return_dict: bool
|
|
2252
|
-
use_audio_in_video: bool
|
|
2253
|
-
cache_position: torch.LongTensor
|
|
2254
|
-
video_second_per_grid: torch.LongTensor
|
|
2187
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
2188
|
+
input_features: Optional[torch.FloatTensor] = None,
|
|
2189
|
+
pixel_values: Optional[torch.FloatTensor] = None,
|
|
2190
|
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
|
2191
|
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
|
2192
|
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
|
2193
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
2194
|
+
feature_attention_mask: Optional[torch.Tensor] = None,
|
|
2195
|
+
audio_feature_lengths: Optional[torch.LongTensor] = None,
|
|
2196
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
2197
|
+
past_key_values: Optional[Cache] = None,
|
|
2198
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
2199
|
+
rope_deltas: Optional[torch.LongTensor] = None,
|
|
2200
|
+
labels: Optional[torch.LongTensor] = None,
|
|
2201
|
+
use_cache: Optional[bool] = None,
|
|
2202
|
+
output_attentions: Optional[bool] = None,
|
|
2203
|
+
output_hidden_states: Optional[bool] = None,
|
|
2204
|
+
return_dict: Optional[bool] = None,
|
|
2205
|
+
use_audio_in_video: Optional[bool] = None,
|
|
2206
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
2207
|
+
video_second_per_grid: Optional[torch.LongTensor] = None,
|
|
2255
2208
|
**kwargs: Unpack[TransformersKwargs],
|
|
2256
|
-
) -> tuple
|
|
2209
|
+
) -> Union[tuple, Qwen2_5OmniThinkerCausalLMOutputWithPast]:
|
|
2257
2210
|
r"""
|
|
2258
2211
|
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
|
|
2259
2212
|
The temporal, height and width of feature shape of each image in LLM.
|
|
@@ -2326,14 +2279,13 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2326
2279
|
input_features,
|
|
2327
2280
|
feature_attention_mask=feature_attention_mask,
|
|
2328
2281
|
audio_feature_lengths=audio_feature_lengths,
|
|
2329
|
-
|
|
2330
|
-
).last_hidden_state
|
|
2282
|
+
)
|
|
2331
2283
|
audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
|
|
2332
2284
|
_, _, audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
|
|
2333
2285
|
inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features)
|
|
2334
2286
|
|
|
2335
2287
|
if pixel_values is not None:
|
|
2336
|
-
image_embeds = self.get_image_features(pixel_values, image_grid_thw
|
|
2288
|
+
image_embeds = self.get_image_features(pixel_values, image_grid_thw)
|
|
2337
2289
|
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
|
|
2338
2290
|
image_mask, _, _ = self.get_placeholder_mask(
|
|
2339
2291
|
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
|
|
@@ -2341,7 +2293,7 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2341
2293
|
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
|
|
2342
2294
|
|
|
2343
2295
|
if pixel_values_videos is not None:
|
|
2344
|
-
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw
|
|
2296
|
+
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
|
|
2345
2297
|
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
|
|
2346
2298
|
_, video_mask, _ = self.get_placeholder_mask(
|
|
2347
2299
|
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
|
|
@@ -2354,8 +2306,11 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2354
2306
|
audio_feature_lengths = None
|
|
2355
2307
|
|
|
2356
2308
|
if attention_mask is not None and position_ids is None:
|
|
2357
|
-
|
|
2358
|
-
|
|
2309
|
+
if (
|
|
2310
|
+
cache_position is None
|
|
2311
|
+
or (cache_position is not None and cache_position[0] == 0)
|
|
2312
|
+
or self.rope_deltas is None
|
|
2313
|
+
):
|
|
2359
2314
|
delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1)
|
|
2360
2315
|
position_ids, rope_deltas = self.get_rope_index(
|
|
2361
2316
|
input_ids,
|
|
@@ -2370,7 +2325,7 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2370
2325
|
self.rope_deltas = rope_deltas
|
|
2371
2326
|
else:
|
|
2372
2327
|
batch_size, seq_length = input_ids.shape
|
|
2373
|
-
delta =
|
|
2328
|
+
delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0
|
|
2374
2329
|
position_ids = torch.arange(seq_length, device=input_ids.device)
|
|
2375
2330
|
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
|
|
2376
2331
|
position_ids = position_ids.add(delta)
|
|
@@ -2428,7 +2383,6 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2428
2383
|
feature_attention_mask=None,
|
|
2429
2384
|
use_audio_in_video=False,
|
|
2430
2385
|
video_second_per_grid=None,
|
|
2431
|
-
is_first_iteration=False,
|
|
2432
2386
|
**kwargs,
|
|
2433
2387
|
):
|
|
2434
2388
|
model_inputs = super().prepare_inputs_for_generation(
|
|
@@ -2447,13 +2401,12 @@ class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCo
|
|
|
2447
2401
|
feature_attention_mask=feature_attention_mask,
|
|
2448
2402
|
use_audio_in_video=use_audio_in_video,
|
|
2449
2403
|
video_second_per_grid=video_second_per_grid,
|
|
2450
|
-
is_first_iteration=is_first_iteration,
|
|
2451
2404
|
**kwargs,
|
|
2452
2405
|
)
|
|
2453
2406
|
|
|
2454
2407
|
model_inputs["position_ids"] = None
|
|
2455
2408
|
|
|
2456
|
-
if
|
|
2409
|
+
if cache_position[0] != 0:
|
|
2457
2410
|
model_inputs["pixel_values"] = None
|
|
2458
2411
|
model_inputs["pixel_values_videos"] = None
|
|
2459
2412
|
model_inputs["input_features"] = None
|
|
@@ -2490,13 +2443,13 @@ class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput):
|
|
|
2490
2443
|
response that the talker model will use to generate speech tokens.
|
|
2491
2444
|
"""
|
|
2492
2445
|
|
|
2493
|
-
loss: torch.FloatTensor
|
|
2494
|
-
logits: torch.FloatTensor
|
|
2495
|
-
past_key_values: Cache
|
|
2496
|
-
hidden_states: tuple[torch.FloatTensor]
|
|
2497
|
-
attentions: tuple[torch.FloatTensor]
|
|
2498
|
-
rope_deltas: torch.LongTensor
|
|
2499
|
-
thinker_reply_part: torch.FloatTensor
|
|
2446
|
+
loss: Optional[torch.FloatTensor] = None
|
|
2447
|
+
logits: Optional[torch.FloatTensor] = None
|
|
2448
|
+
past_key_values: Optional[Cache] = None
|
|
2449
|
+
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
|
2450
|
+
attentions: Optional[tuple[torch.FloatTensor]] = None
|
|
2451
|
+
rope_deltas: Optional[torch.LongTensor] = None
|
|
2452
|
+
thinker_reply_part: Optional[torch.FloatTensor] = None
|
|
2500
2453
|
|
|
2501
2454
|
|
|
2502
2455
|
class Qwen2_5OmniTalkerModel(Qwen2_5_VLTextModel):
|
|
@@ -2547,26 +2500,25 @@ class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCon
|
|
|
2547
2500
|
@auto_docstring
|
|
2548
2501
|
def forward(
|
|
2549
2502
|
self,
|
|
2550
|
-
input_ids: torch.LongTensor
|
|
2551
|
-
attention_mask: torch.Tensor
|
|
2552
|
-
position_ids: torch.LongTensor
|
|
2553
|
-
past_key_values: Cache
|
|
2554
|
-
thinker_reply_part: torch.FloatTensor
|
|
2555
|
-
inputs_embeds: torch.FloatTensor
|
|
2556
|
-
rope_deltas: torch.LongTensor
|
|
2557
|
-
use_cache: bool
|
|
2558
|
-
cache_position: torch.LongTensor
|
|
2559
|
-
input_text_ids: torch.LongTensor
|
|
2560
|
-
image_grid_thw: torch.LongTensor
|
|
2561
|
-
video_grid_thw: torch.LongTensor
|
|
2562
|
-
use_audio_in_video: bool
|
|
2563
|
-
audio_feature_lengths: torch.LongTensor
|
|
2564
|
-
video_second_per_grid: torch.LongTensor
|
|
2565
|
-
output_attentions: bool
|
|
2566
|
-
output_hidden_states: bool
|
|
2567
|
-
return_dict: bool
|
|
2568
|
-
|
|
2569
|
-
) -> tuple | Qwen2_5OmniTalkerCausalLMOutputWithPast:
|
|
2503
|
+
input_ids: Optional[torch.LongTensor] = None,
|
|
2504
|
+
attention_mask: Optional[torch.Tensor] = None,
|
|
2505
|
+
position_ids: Optional[torch.LongTensor] = None,
|
|
2506
|
+
past_key_values: Optional[Cache] = None,
|
|
2507
|
+
thinker_reply_part: Optional[torch.FloatTensor] = None,
|
|
2508
|
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
2509
|
+
rope_deltas: Optional[torch.LongTensor] = None,
|
|
2510
|
+
use_cache: Optional[bool] = None,
|
|
2511
|
+
cache_position: Optional[torch.LongTensor] = None,
|
|
2512
|
+
input_text_ids: Optional[torch.LongTensor] = None,
|
|
2513
|
+
image_grid_thw: Optional[torch.LongTensor] = None,
|
|
2514
|
+
video_grid_thw: Optional[torch.LongTensor] = None,
|
|
2515
|
+
use_audio_in_video: Optional[bool] = None,
|
|
2516
|
+
audio_feature_lengths: Optional[torch.LongTensor] = None,
|
|
2517
|
+
video_second_per_grid: Optional[torch.LongTensor] = None,
|
|
2518
|
+
output_attentions: Optional[bool] = None,
|
|
2519
|
+
output_hidden_states: Optional[bool] = None,
|
|
2520
|
+
return_dict: Optional[bool] = None,
|
|
2521
|
+
) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]:
|
|
2570
2522
|
r"""
|
|
2571
2523
|
thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
2572
2524
|
Hidden states from the thinker model's output that represent the text reply part to be processed.
|
|
@@ -2615,8 +2567,11 @@ class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCon
|
|
|
2615
2567
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
2616
2568
|
|
|
2617
2569
|
if attention_mask is not None and position_ids is None:
|
|
2618
|
-
|
|
2619
|
-
|
|
2570
|
+
if (
|
|
2571
|
+
cache_position is None
|
|
2572
|
+
or (cache_position is not None and cache_position[0] == 0)
|
|
2573
|
+
or self.rope_deltas is None
|
|
2574
|
+
):
|
|
2620
2575
|
position_ids, rope_deltas = self.get_rope_index(
|
|
2621
2576
|
input_text_ids,
|
|
2622
2577
|
image_grid_thw,
|
|
@@ -2636,12 +2591,8 @@ class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForCon
|
|
|
2636
2591
|
self.rope_deltas = rope_deltas
|
|
2637
2592
|
|
|
2638
2593
|
else:
|
|
2639
|
-
|
|
2640
|
-
|
|
2641
|
-
else:
|
|
2642
|
-
batch_size, seq_length = input_ids.shape
|
|
2643
|
-
|
|
2644
|
-
delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
|
|
2594
|
+
batch_size, seq_length = input_ids.shape
|
|
2595
|
+
delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0
|
|
2645
2596
|
position_ids = torch.arange(seq_length, device=input_ids.device)
|
|
2646
2597
|
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
|
|
2647
2598
|
position_ids = position_ids.add(delta)
|
|
@@ -2769,9 +2720,9 @@ class Qwen2_5OmniDiTRotaryEmbedding(LlamaRotaryEmbedding):
|
|
|
2769
2720
|
|
|
2770
2721
|
@staticmethod
|
|
2771
2722
|
def compute_default_rope_parameters(
|
|
2772
|
-
config: Qwen2_5OmniDiTConfig
|
|
2723
|
+
config: Optional[Qwen2_5OmniDiTConfig] = None,
|
|
2773
2724
|
device: Optional["torch.device"] = None,
|
|
2774
|
-
seq_len: int
|
|
2725
|
+
seq_len: Optional[int] = None,
|
|
2775
2726
|
) -> tuple["torch.Tensor", float]:
|
|
2776
2727
|
return super().compute_default_rope_parameters(
|
|
2777
2728
|
config,
|
|
@@ -2781,7 +2732,7 @@ class Qwen2_5OmniDiTRotaryEmbedding(LlamaRotaryEmbedding):
|
|
|
2781
2732
|
|
|
2782
2733
|
|
|
2783
2734
|
# Modified from Llama with a different rotate function, will fixed in next release
|
|
2784
|
-
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
|
|
2735
|
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
|
2785
2736
|
"""Applies Rotary Position Embedding to the query and key tensors.
|
|
2786
2737
|
|
|
2787
2738
|
Args:
|
|
@@ -2789,6 +2740,8 @@ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
|
|
|
2789
2740
|
k (`torch.Tensor`): The key tensor.
|
|
2790
2741
|
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
|
2791
2742
|
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
|
2743
|
+
position_ids (`torch.Tensor`, *optional*):
|
|
2744
|
+
Deprecated and unused.
|
|
2792
2745
|
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
|
2793
2746
|
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
|
2794
2747
|
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
|
@@ -3131,9 +3084,9 @@ class DiTInputEmbedding(nn.Module):
|
|
|
3131
3084
|
speaker_embedding: torch.Tensor,
|
|
3132
3085
|
condition_vector: torch.Tensor,
|
|
3133
3086
|
code_embed: torch.Tensor,
|
|
3134
|
-
drop_audio_cond: bool
|
|
3135
|
-
code_embed_uncond: bool
|
|
3136
|
-
apply_cfg: bool
|
|
3087
|
+
drop_audio_cond: Optional[bool] = False,
|
|
3088
|
+
code_embed_uncond: Optional[bool] = None,
|
|
3089
|
+
apply_cfg: Optional[bool] = True,
|
|
3137
3090
|
):
|
|
3138
3091
|
if apply_cfg:
|
|
3139
3092
|
hidden_states = torch.cat([hidden_states, hidden_states], dim=0)
|
|
@@ -3469,9 +3422,6 @@ class DownSample1d(nn.Module):
|
|
|
3469
3422
|
super().__init__()
|
|
3470
3423
|
cutoff = 0.5 / ratio
|
|
3471
3424
|
half_width = 0.6 / ratio
|
|
3472
|
-
self.cutoff = cutoff
|
|
3473
|
-
self.half_width = half_width
|
|
3474
|
-
self.kernel_size = kernel_size
|
|
3475
3425
|
|
|
3476
3426
|
if cutoff < 0.0:
|
|
3477
3427
|
raise ValueError("Minimum cutoff must be larger than zero.")
|
|
@@ -3653,8 +3603,6 @@ class Qwen2_5OmniToken2WavBigVGANModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3653
3603
|
config.upsample_initial_channel // (2**self.num_upsample_layers), 1, 7, 1, padding=3, bias=False
|
|
3654
3604
|
)
|
|
3655
3605
|
|
|
3656
|
-
self.post_init()
|
|
3657
|
-
|
|
3658
3606
|
def normalize_spectrogram(self, spectrogram, max_value, min_db):
|
|
3659
3607
|
return torch.clamp((2 * max_value) * ((spectrogram - min_db) / (-min_db)) - max_value, -max_value, max_value)
|
|
3660
3608
|
|
|
@@ -3669,7 +3617,7 @@ class Qwen2_5OmniToken2WavBigVGANModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3669
3617
|
decibel_spectrum = self.amplitude_to_db(amplitude_spectrum, -115) - 20
|
|
3670
3618
|
return self.normalize_spectrogram(decibel_spectrum, 1, -115)
|
|
3671
3619
|
|
|
3672
|
-
def forward(self, mel_spectrogram
|
|
3620
|
+
def forward(self, mel_spectrogram):
|
|
3673
3621
|
processed_spectrogram = self.process_mel_spectrogram(mel_spectrogram)
|
|
3674
3622
|
hidden_representation = self.conv_pre(processed_spectrogram)
|
|
3675
3623
|
|
|
@@ -3782,8 +3730,6 @@ class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3782
3730
|
self.norm_out = Qwen2_5_OmniAdaLayerNormZero_Final(config.hidden_size) # final modulation
|
|
3783
3731
|
self.proj_out = nn.Linear(config.hidden_size, config.mel_dim)
|
|
3784
3732
|
|
|
3785
|
-
self.post_init()
|
|
3786
|
-
|
|
3787
3733
|
def _create_block_diff(self, hidden_states):
|
|
3788
3734
|
batch, seq_len = hidden_states.shape[0], hidden_states.shape[1]
|
|
3789
3735
|
block_indices = torch.arange(seq_len, device=hidden_states.device) // self.block_size # [seq_length]
|
|
@@ -3804,7 +3750,6 @@ class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3804
3750
|
drop_audio_conditioning=False,
|
|
3805
3751
|
drop_code=False,
|
|
3806
3752
|
apply_cfg=True,
|
|
3807
|
-
**kwargs,
|
|
3808
3753
|
):
|
|
3809
3754
|
batch_size = hidden_states.shape[0]
|
|
3810
3755
|
if time_step.ndim == 0:
|
|
@@ -3855,24 +3800,15 @@ class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3855
3800
|
guidance_scale=0.5,
|
|
3856
3801
|
sway_coefficient=-1.0,
|
|
3857
3802
|
):
|
|
3803
|
+
noise_initialization = torch.randn([1, 30000, self.mel_dim], dtype=reference_mel_spectrogram.dtype)
|
|
3858
3804
|
maximum_duration = quantized_code.shape[1] * self.repeats
|
|
3805
|
+
initial_state = noise_initialization[:, :maximum_duration].to(quantized_code.device)
|
|
3859
3806
|
batch_size = reference_mel_spectrogram.shape[0]
|
|
3807
|
+
conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1)
|
|
3808
|
+
|
|
3860
3809
|
if batch_size != 1:
|
|
3861
3810
|
raise ValueError("Only batch size = 1 is currently supported")
|
|
3862
3811
|
|
|
3863
|
-
if maximum_duration > self.config.max_position_embeddings:
|
|
3864
|
-
raise ValueError(
|
|
3865
|
-
f"Requested mel length ({maximum_duration}) exceeds `dit_config.max_position_embeddings` "
|
|
3866
|
-
f"({self.config.max_position_embeddings}). Provide shorter `quantized_code`."
|
|
3867
|
-
)
|
|
3868
|
-
|
|
3869
|
-
initial_state = torch.randn(
|
|
3870
|
-
[batch_size, maximum_duration, self.mel_dim],
|
|
3871
|
-
dtype=reference_mel_spectrogram.dtype,
|
|
3872
|
-
device=quantized_code.device,
|
|
3873
|
-
)
|
|
3874
|
-
conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1)
|
|
3875
|
-
|
|
3876
3812
|
def ode_function(time_step, hidden_states):
|
|
3877
3813
|
if guidance_scale < 1e-5:
|
|
3878
3814
|
prediction = self(
|
|
@@ -3883,7 +3819,6 @@ class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3883
3819
|
time_step=time_step,
|
|
3884
3820
|
drop_audio_conditioning=False,
|
|
3885
3821
|
drop_code=False,
|
|
3886
|
-
apply_cfg=False,
|
|
3887
3822
|
)
|
|
3888
3823
|
return prediction
|
|
3889
3824
|
|
|
@@ -3928,9 +3863,9 @@ class Qwen2_5OmniToken2WavModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3928
3863
|
def __init__(self, config: Qwen2_5OmniToken2WavConfig):
|
|
3929
3864
|
super().__init__(config)
|
|
3930
3865
|
attn_impl = config._attn_implementation
|
|
3931
|
-
if
|
|
3866
|
+
if config._attn_implementation == "flash_attention_2":
|
|
3932
3867
|
logger.warning_once(
|
|
3933
|
-
"Qwen2_5OmniToken2WavModel must inference with fp32, but
|
|
3868
|
+
"Qwen2_5OmniToken2WavModel must inference with fp32, but flash_attention_2 only supports fp16 and bf16, "
|
|
3934
3869
|
"attention implementation of Qwen2_5OmniToken2WavModel will fallback to sdpa."
|
|
3935
3870
|
)
|
|
3936
3871
|
attn_impl = "sdpa"
|
|
@@ -3946,8 +3881,6 @@ class Qwen2_5OmniToken2WavModel(Qwen2_5OmniPreTrainedModel):
|
|
|
3946
3881
|
config.bigvgan_config, attn_implementation=attn_impl
|
|
3947
3882
|
)
|
|
3948
3883
|
|
|
3949
|
-
self.post_init()
|
|
3950
|
-
|
|
3951
3884
|
def forward(
|
|
3952
3885
|
self,
|
|
3953
3886
|
code,
|
|
@@ -4080,7 +4013,7 @@ class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, Generation
|
|
|
4080
4013
|
# TODO: raushan, defaults should be saved in generation config
|
|
4081
4014
|
def generate(
|
|
4082
4015
|
self,
|
|
4083
|
-
input_ids: torch.Tensor
|
|
4016
|
+
input_ids: Optional[torch.Tensor] = None,
|
|
4084
4017
|
speaker: str = "Chelsie",
|
|
4085
4018
|
use_audio_in_video: bool = False,
|
|
4086
4019
|
thinker_max_new_tokens: int = 1024,
|
|
@@ -4116,7 +4049,7 @@ class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, Generation
|
|
|
4116
4049
|
- **Text** (`torch.Tensor`): Generated text token sequence.
|
|
4117
4050
|
- **Audio waveform** (`torch.Tensor`): Generated audio waveform.
|
|
4118
4051
|
"""
|
|
4119
|
-
# check `False` on purpose because the
|
|
4052
|
+
# check `False` on purpose because the paramter can be `str/bool`. This is needed for BC
|
|
4120
4053
|
generation_mode = kwargs.pop("generation_mode", None)
|
|
4121
4054
|
return_audio = generation_mode != "text" and generation_mode is not False
|
|
4122
4055
|
|