transformers 5.0.0rc3__py3-none-any.whl → 5.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- transformers/__init__.py +4 -11
- transformers/activations.py +2 -2
- transformers/backbone_utils.py +326 -0
- transformers/cache_utils.py +11 -2
- transformers/cli/serve.py +11 -8
- transformers/configuration_utils.py +1 -69
- transformers/conversion_mapping.py +146 -26
- transformers/convert_slow_tokenizer.py +6 -4
- transformers/core_model_loading.py +207 -118
- transformers/dependency_versions_check.py +0 -1
- transformers/dependency_versions_table.py +7 -8
- transformers/file_utils.py +0 -2
- transformers/generation/candidate_generator.py +1 -2
- transformers/generation/continuous_batching/cache.py +40 -38
- transformers/generation/continuous_batching/cache_manager.py +3 -16
- transformers/generation/continuous_batching/continuous_api.py +94 -406
- transformers/generation/continuous_batching/input_ouputs.py +464 -0
- transformers/generation/continuous_batching/requests.py +54 -17
- transformers/generation/continuous_batching/scheduler.py +77 -95
- transformers/generation/logits_process.py +10 -5
- transformers/generation/stopping_criteria.py +1 -2
- transformers/generation/utils.py +75 -95
- transformers/image_processing_utils.py +0 -3
- transformers/image_processing_utils_fast.py +17 -18
- transformers/image_transforms.py +44 -13
- transformers/image_utils.py +0 -5
- transformers/initialization.py +57 -0
- transformers/integrations/__init__.py +10 -24
- transformers/integrations/accelerate.py +47 -11
- transformers/integrations/deepspeed.py +145 -3
- transformers/integrations/executorch.py +2 -6
- transformers/integrations/finegrained_fp8.py +142 -7
- transformers/integrations/flash_attention.py +2 -7
- transformers/integrations/hub_kernels.py +18 -7
- transformers/integrations/moe.py +226 -106
- transformers/integrations/mxfp4.py +47 -34
- transformers/integrations/peft.py +488 -176
- transformers/integrations/tensor_parallel.py +641 -581
- transformers/masking_utils.py +153 -9
- transformers/modeling_flash_attention_utils.py +1 -2
- transformers/modeling_utils.py +359 -358
- transformers/models/__init__.py +6 -0
- transformers/models/afmoe/configuration_afmoe.py +14 -4
- transformers/models/afmoe/modeling_afmoe.py +8 -8
- transformers/models/afmoe/modular_afmoe.py +7 -7
- transformers/models/aimv2/configuration_aimv2.py +2 -7
- transformers/models/aimv2/modeling_aimv2.py +26 -24
- transformers/models/aimv2/modular_aimv2.py +8 -12
- transformers/models/albert/configuration_albert.py +8 -1
- transformers/models/albert/modeling_albert.py +3 -3
- transformers/models/align/configuration_align.py +8 -5
- transformers/models/align/modeling_align.py +22 -24
- transformers/models/altclip/configuration_altclip.py +4 -6
- transformers/models/altclip/modeling_altclip.py +30 -26
- transformers/models/apertus/configuration_apertus.py +5 -7
- transformers/models/apertus/modeling_apertus.py +4 -4
- transformers/models/apertus/modular_apertus.py +8 -10
- transformers/models/arcee/configuration_arcee.py +5 -7
- transformers/models/arcee/modeling_arcee.py +4 -4
- transformers/models/aria/configuration_aria.py +11 -21
- transformers/models/aria/modeling_aria.py +39 -36
- transformers/models/aria/modular_aria.py +33 -39
- transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +3 -3
- transformers/models/audioflamingo3/modeling_audioflamingo3.py +39 -30
- transformers/models/audioflamingo3/modular_audioflamingo3.py +41 -27
- transformers/models/auto/auto_factory.py +8 -6
- transformers/models/auto/configuration_auto.py +22 -0
- transformers/models/auto/image_processing_auto.py +17 -13
- transformers/models/auto/modeling_auto.py +15 -0
- transformers/models/auto/processing_auto.py +9 -18
- transformers/models/auto/tokenization_auto.py +17 -15
- transformers/models/autoformer/modeling_autoformer.py +2 -1
- transformers/models/aya_vision/configuration_aya_vision.py +4 -0
- transformers/models/aya_vision/modeling_aya_vision.py +29 -62
- transformers/models/aya_vision/modular_aya_vision.py +20 -45
- transformers/models/bamba/configuration_bamba.py +17 -7
- transformers/models/bamba/modeling_bamba.py +23 -55
- transformers/models/bamba/modular_bamba.py +19 -54
- transformers/models/bark/configuration_bark.py +2 -1
- transformers/models/bark/modeling_bark.py +24 -10
- transformers/models/bart/configuration_bart.py +9 -4
- transformers/models/bart/modeling_bart.py +9 -12
- transformers/models/beit/configuration_beit.py +2 -4
- transformers/models/beit/image_processing_beit_fast.py +3 -3
- transformers/models/beit/modeling_beit.py +14 -9
- transformers/models/bert/configuration_bert.py +12 -1
- transformers/models/bert/modeling_bert.py +6 -30
- transformers/models/bert_generation/configuration_bert_generation.py +17 -1
- transformers/models/bert_generation/modeling_bert_generation.py +6 -6
- transformers/models/big_bird/configuration_big_bird.py +12 -8
- transformers/models/big_bird/modeling_big_bird.py +0 -15
- transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +9 -8
- transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +9 -7
- transformers/models/biogpt/configuration_biogpt.py +8 -1
- transformers/models/biogpt/modeling_biogpt.py +4 -8
- transformers/models/biogpt/modular_biogpt.py +1 -5
- transformers/models/bit/configuration_bit.py +2 -4
- transformers/models/bit/modeling_bit.py +6 -5
- transformers/models/bitnet/configuration_bitnet.py +5 -7
- transformers/models/bitnet/modeling_bitnet.py +3 -4
- transformers/models/bitnet/modular_bitnet.py +3 -4
- transformers/models/blenderbot/configuration_blenderbot.py +8 -4
- transformers/models/blenderbot/modeling_blenderbot.py +4 -4
- transformers/models/blenderbot_small/configuration_blenderbot_small.py +8 -4
- transformers/models/blenderbot_small/modeling_blenderbot_small.py +4 -4
- transformers/models/blip/configuration_blip.py +9 -9
- transformers/models/blip/modeling_blip.py +55 -37
- transformers/models/blip_2/configuration_blip_2.py +2 -1
- transformers/models/blip_2/modeling_blip_2.py +81 -56
- transformers/models/bloom/configuration_bloom.py +5 -1
- transformers/models/bloom/modeling_bloom.py +2 -1
- transformers/models/blt/configuration_blt.py +23 -12
- transformers/models/blt/modeling_blt.py +20 -14
- transformers/models/blt/modular_blt.py +70 -10
- transformers/models/bridgetower/configuration_bridgetower.py +7 -1
- transformers/models/bridgetower/image_processing_bridgetower_fast.py +6 -6
- transformers/models/bridgetower/modeling_bridgetower.py +29 -15
- transformers/models/bros/configuration_bros.py +24 -17
- transformers/models/camembert/configuration_camembert.py +8 -1
- transformers/models/camembert/modeling_camembert.py +6 -6
- transformers/models/canine/configuration_canine.py +4 -1
- transformers/models/chameleon/configuration_chameleon.py +5 -7
- transformers/models/chameleon/image_processing_chameleon_fast.py +5 -5
- transformers/models/chameleon/modeling_chameleon.py +82 -36
- transformers/models/chinese_clip/configuration_chinese_clip.py +10 -7
- transformers/models/chinese_clip/modeling_chinese_clip.py +28 -29
- transformers/models/clap/configuration_clap.py +4 -8
- transformers/models/clap/modeling_clap.py +21 -22
- transformers/models/clip/configuration_clip.py +4 -1
- transformers/models/clip/image_processing_clip_fast.py +9 -0
- transformers/models/clip/modeling_clip.py +25 -22
- transformers/models/clipseg/configuration_clipseg.py +4 -1
- transformers/models/clipseg/modeling_clipseg.py +27 -25
- transformers/models/clipseg/processing_clipseg.py +11 -3
- transformers/models/clvp/configuration_clvp.py +14 -2
- transformers/models/clvp/modeling_clvp.py +19 -30
- transformers/models/codegen/configuration_codegen.py +4 -3
- transformers/models/codegen/modeling_codegen.py +2 -1
- transformers/models/cohere/configuration_cohere.py +5 -7
- transformers/models/cohere/modeling_cohere.py +4 -4
- transformers/models/cohere/modular_cohere.py +3 -3
- transformers/models/cohere2/configuration_cohere2.py +6 -8
- transformers/models/cohere2/modeling_cohere2.py +4 -4
- transformers/models/cohere2/modular_cohere2.py +9 -11
- transformers/models/cohere2_vision/configuration_cohere2_vision.py +5 -1
- transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +3 -3
- transformers/models/cohere2_vision/modeling_cohere2_vision.py +24 -25
- transformers/models/cohere2_vision/modular_cohere2_vision.py +20 -20
- transformers/models/colqwen2/modeling_colqwen2.py +7 -6
- transformers/models/colqwen2/modular_colqwen2.py +7 -6
- transformers/models/conditional_detr/configuration_conditional_detr.py +19 -46
- transformers/models/conditional_detr/image_processing_conditional_detr.py +3 -4
- transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +28 -14
- transformers/models/conditional_detr/modeling_conditional_detr.py +794 -942
- transformers/models/conditional_detr/modular_conditional_detr.py +901 -3
- transformers/models/convbert/configuration_convbert.py +11 -7
- transformers/models/convnext/configuration_convnext.py +2 -4
- transformers/models/convnext/image_processing_convnext_fast.py +2 -2
- transformers/models/convnext/modeling_convnext.py +7 -6
- transformers/models/convnextv2/configuration_convnextv2.py +2 -4
- transformers/models/convnextv2/modeling_convnextv2.py +7 -6
- transformers/models/cpmant/configuration_cpmant.py +4 -0
- transformers/models/csm/configuration_csm.py +9 -15
- transformers/models/csm/modeling_csm.py +3 -3
- transformers/models/ctrl/configuration_ctrl.py +16 -0
- transformers/models/ctrl/modeling_ctrl.py +13 -25
- transformers/models/cwm/configuration_cwm.py +5 -7
- transformers/models/cwm/modeling_cwm.py +4 -4
- transformers/models/d_fine/configuration_d_fine.py +10 -56
- transformers/models/d_fine/modeling_d_fine.py +728 -868
- transformers/models/d_fine/modular_d_fine.py +335 -412
- transformers/models/dab_detr/configuration_dab_detr.py +22 -48
- transformers/models/dab_detr/modeling_dab_detr.py +11 -7
- transformers/models/dac/modeling_dac.py +1 -1
- transformers/models/data2vec/configuration_data2vec_audio.py +4 -1
- transformers/models/data2vec/configuration_data2vec_text.py +11 -2
- transformers/models/data2vec/modeling_data2vec_audio.py +3 -3
- transformers/models/data2vec/modeling_data2vec_text.py +6 -6
- transformers/models/data2vec/modeling_data2vec_vision.py +4 -2
- transformers/models/dbrx/configuration_dbrx.py +11 -3
- transformers/models/dbrx/modeling_dbrx.py +6 -6
- transformers/models/dbrx/modular_dbrx.py +6 -6
- transformers/models/deberta/configuration_deberta.py +6 -0
- transformers/models/deberta_v2/configuration_deberta_v2.py +6 -0
- transformers/models/decision_transformer/configuration_decision_transformer.py +3 -1
- transformers/models/decision_transformer/modeling_decision_transformer.py +3 -3
- transformers/models/deepseek_v2/configuration_deepseek_v2.py +7 -10
- transformers/models/deepseek_v2/modeling_deepseek_v2.py +7 -8
- transformers/models/deepseek_v2/modular_deepseek_v2.py +8 -10
- transformers/models/deepseek_v3/configuration_deepseek_v3.py +7 -10
- transformers/models/deepseek_v3/modeling_deepseek_v3.py +7 -7
- transformers/models/deepseek_v3/modular_deepseek_v3.py +6 -5
- transformers/models/deepseek_vl/configuration_deepseek_vl.py +4 -0
- transformers/models/deepseek_vl/image_processing_deepseek_vl.py +2 -2
- transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +5 -5
- transformers/models/deepseek_vl/modeling_deepseek_vl.py +17 -12
- transformers/models/deepseek_vl/modular_deepseek_vl.py +4 -0
- transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +4 -0
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +2 -2
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +6 -6
- transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +68 -24
- transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +70 -19
- transformers/models/deformable_detr/configuration_deformable_detr.py +22 -45
- transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +25 -11
- transformers/models/deformable_detr/modeling_deformable_detr.py +410 -607
- transformers/models/deformable_detr/modular_deformable_detr.py +1385 -3
- transformers/models/deit/modeling_deit.py +11 -7
- transformers/models/depth_anything/configuration_depth_anything.py +12 -42
- transformers/models/depth_anything/modeling_depth_anything.py +5 -3
- transformers/models/depth_pro/image_processing_depth_pro_fast.py +2 -2
- transformers/models/depth_pro/modeling_depth_pro.py +8 -4
- transformers/models/detr/configuration_detr.py +18 -49
- transformers/models/detr/image_processing_detr_fast.py +11 -11
- transformers/models/detr/modeling_detr.py +695 -734
- transformers/models/dia/configuration_dia.py +4 -7
- transformers/models/dia/generation_dia.py +8 -17
- transformers/models/dia/modeling_dia.py +7 -7
- transformers/models/dia/modular_dia.py +4 -4
- transformers/models/diffllama/configuration_diffllama.py +5 -7
- transformers/models/diffllama/modeling_diffllama.py +3 -8
- transformers/models/diffllama/modular_diffllama.py +2 -7
- transformers/models/dinat/configuration_dinat.py +2 -4
- transformers/models/dinat/modeling_dinat.py +7 -6
- transformers/models/dinov2/configuration_dinov2.py +2 -4
- transformers/models/dinov2/modeling_dinov2.py +9 -8
- transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +2 -4
- transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +9 -8
- transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +6 -7
- transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +2 -4
- transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +2 -3
- transformers/models/dinov3_vit/configuration_dinov3_vit.py +2 -4
- transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +2 -2
- transformers/models/dinov3_vit/modeling_dinov3_vit.py +5 -6
- transformers/models/dinov3_vit/modular_dinov3_vit.py +5 -6
- transformers/models/distilbert/configuration_distilbert.py +8 -1
- transformers/models/distilbert/modeling_distilbert.py +3 -3
- transformers/models/doge/configuration_doge.py +17 -7
- transformers/models/doge/modeling_doge.py +4 -4
- transformers/models/doge/modular_doge.py +20 -10
- transformers/models/donut/image_processing_donut_fast.py +4 -4
- transformers/models/dots1/configuration_dots1.py +16 -7
- transformers/models/dots1/modeling_dots1.py +4 -4
- transformers/models/dpr/configuration_dpr.py +19 -1
- transformers/models/dpt/configuration_dpt.py +23 -65
- transformers/models/dpt/image_processing_dpt_fast.py +5 -5
- transformers/models/dpt/modeling_dpt.py +19 -15
- transformers/models/dpt/modular_dpt.py +4 -4
- transformers/models/edgetam/configuration_edgetam.py +1 -1
- transformers/models/edgetam/modeling_edgetam.py +53 -53
- transformers/models/edgetam/modular_edgetam.py +5 -7
- transformers/models/edgetam_video/modeling_edgetam_video.py +55 -56
- transformers/models/edgetam_video/modular_edgetam_video.py +9 -9
- transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +4 -3
- transformers/models/efficientloftr/modeling_efficientloftr.py +19 -9
- transformers/models/efficientnet/image_processing_efficientnet_fast.py +2 -2
- transformers/models/electra/configuration_electra.py +13 -2
- transformers/models/electra/modeling_electra.py +6 -6
- transformers/models/emu3/configuration_emu3.py +12 -10
- transformers/models/emu3/modeling_emu3.py +84 -47
- transformers/models/emu3/modular_emu3.py +77 -39
- transformers/models/encoder_decoder/configuration_encoder_decoder.py +12 -1
- transformers/models/encoder_decoder/modeling_encoder_decoder.py +20 -24
- transformers/models/eomt/configuration_eomt.py +12 -13
- transformers/models/eomt/image_processing_eomt_fast.py +3 -3
- transformers/models/eomt/modeling_eomt.py +3 -3
- transformers/models/eomt/modular_eomt.py +17 -17
- transformers/models/eomt_dinov3/__init__.py +28 -0
- transformers/models/eomt_dinov3/configuration_eomt_dinov3.py +204 -0
- transformers/models/eomt_dinov3/modeling_eomt_dinov3.py +1376 -0
- transformers/models/eomt_dinov3/modular_eomt_dinov3.py +454 -0
- transformers/models/ernie/configuration_ernie.py +24 -2
- transformers/models/ernie/modeling_ernie.py +6 -30
- transformers/models/ernie4_5/configuration_ernie4_5.py +5 -7
- transformers/models/ernie4_5/modeling_ernie4_5.py +4 -4
- transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +7 -10
- transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +4 -4
- transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +17 -6
- transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +229 -188
- transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +79 -55
- transformers/models/esm/configuration_esm.py +9 -11
- transformers/models/esm/modeling_esm.py +3 -3
- transformers/models/esm/modeling_esmfold.py +1 -6
- transformers/models/esm/openfold_utils/protein.py +2 -3
- transformers/models/evolla/configuration_evolla.py +21 -8
- transformers/models/evolla/modeling_evolla.py +11 -7
- transformers/models/evolla/modular_evolla.py +5 -1
- transformers/models/exaone4/configuration_exaone4.py +8 -5
- transformers/models/exaone4/modeling_exaone4.py +4 -4
- transformers/models/exaone4/modular_exaone4.py +11 -8
- transformers/models/exaone_moe/__init__.py +27 -0
- transformers/models/exaone_moe/configuration_exaone_moe.py +235 -0
- transformers/models/exaone_moe/modeling_exaone_moe.py +665 -0
- transformers/models/exaone_moe/modular_exaone_moe.py +373 -0
- transformers/models/falcon/configuration_falcon.py +9 -1
- transformers/models/falcon/modeling_falcon.py +3 -8
- transformers/models/falcon_h1/configuration_falcon_h1.py +17 -8
- transformers/models/falcon_h1/modeling_falcon_h1.py +22 -54
- transformers/models/falcon_h1/modular_falcon_h1.py +21 -52
- transformers/models/falcon_mamba/configuration_falcon_mamba.py +5 -1
- transformers/models/falcon_mamba/modeling_falcon_mamba.py +18 -26
- transformers/models/falcon_mamba/modular_falcon_mamba.py +4 -0
- transformers/models/fast_vlm/configuration_fast_vlm.py +10 -1
- transformers/models/fast_vlm/modeling_fast_vlm.py +37 -64
- transformers/models/fast_vlm/modular_fast_vlm.py +146 -35
- transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +0 -1
- transformers/models/flaubert/configuration_flaubert.py +10 -4
- transformers/models/flaubert/modeling_flaubert.py +1 -1
- transformers/models/flava/configuration_flava.py +4 -3
- transformers/models/flava/image_processing_flava_fast.py +4 -4
- transformers/models/flava/modeling_flava.py +36 -28
- transformers/models/flex_olmo/configuration_flex_olmo.py +11 -14
- transformers/models/flex_olmo/modeling_flex_olmo.py +4 -4
- transformers/models/flex_olmo/modular_flex_olmo.py +11 -14
- transformers/models/florence2/configuration_florence2.py +4 -0
- transformers/models/florence2/modeling_florence2.py +57 -32
- transformers/models/florence2/modular_florence2.py +48 -26
- transformers/models/fnet/configuration_fnet.py +6 -1
- transformers/models/focalnet/configuration_focalnet.py +2 -4
- transformers/models/focalnet/modeling_focalnet.py +10 -7
- transformers/models/fsmt/configuration_fsmt.py +12 -16
- transformers/models/funnel/configuration_funnel.py +8 -0
- transformers/models/fuyu/configuration_fuyu.py +5 -8
- transformers/models/fuyu/image_processing_fuyu_fast.py +5 -4
- transformers/models/fuyu/modeling_fuyu.py +24 -23
- transformers/models/gemma/configuration_gemma.py +5 -7
- transformers/models/gemma/modeling_gemma.py +4 -4
- transformers/models/gemma/modular_gemma.py +5 -7
- transformers/models/gemma2/configuration_gemma2.py +5 -7
- transformers/models/gemma2/modeling_gemma2.py +4 -4
- transformers/models/gemma2/modular_gemma2.py +8 -10
- transformers/models/gemma3/configuration_gemma3.py +28 -22
- transformers/models/gemma3/image_processing_gemma3_fast.py +2 -2
- transformers/models/gemma3/modeling_gemma3.py +37 -33
- transformers/models/gemma3/modular_gemma3.py +46 -42
- transformers/models/gemma3n/configuration_gemma3n.py +35 -22
- transformers/models/gemma3n/modeling_gemma3n.py +86 -58
- transformers/models/gemma3n/modular_gemma3n.py +112 -75
- transformers/models/git/configuration_git.py +5 -7
- transformers/models/git/modeling_git.py +31 -41
- transformers/models/glm/configuration_glm.py +7 -9
- transformers/models/glm/modeling_glm.py +4 -4
- transformers/models/glm4/configuration_glm4.py +7 -9
- transformers/models/glm4/modeling_glm4.py +4 -4
- transformers/models/glm46v/configuration_glm46v.py +4 -0
- transformers/models/glm46v/image_processing_glm46v.py +5 -2
- transformers/models/glm46v/image_processing_glm46v_fast.py +2 -2
- transformers/models/glm46v/modeling_glm46v.py +91 -46
- transformers/models/glm46v/modular_glm46v.py +4 -0
- transformers/models/glm4_moe/configuration_glm4_moe.py +17 -7
- transformers/models/glm4_moe/modeling_glm4_moe.py +4 -4
- transformers/models/glm4_moe/modular_glm4_moe.py +17 -7
- transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +8 -10
- transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +7 -7
- transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +8 -10
- transformers/models/glm4v/configuration_glm4v.py +12 -8
- transformers/models/glm4v/image_processing_glm4v.py +5 -2
- transformers/models/glm4v/image_processing_glm4v_fast.py +2 -2
- transformers/models/glm4v/modeling_glm4v.py +120 -63
- transformers/models/glm4v/modular_glm4v.py +82 -50
- transformers/models/glm4v_moe/configuration_glm4v_moe.py +18 -6
- transformers/models/glm4v_moe/modeling_glm4v_moe.py +115 -63
- transformers/models/glm4v_moe/modular_glm4v_moe.py +23 -12
- transformers/models/glm_image/configuration_glm_image.py +26 -20
- transformers/models/glm_image/image_processing_glm_image.py +1 -1
- transformers/models/glm_image/image_processing_glm_image_fast.py +5 -7
- transformers/models/glm_image/modeling_glm_image.py +337 -236
- transformers/models/glm_image/modular_glm_image.py +415 -255
- transformers/models/glm_image/processing_glm_image.py +65 -17
- transformers/{pipelines/deprecated → models/glm_ocr}/__init__.py +15 -2
- transformers/models/glm_ocr/configuration_glm_ocr.py +312 -0
- transformers/models/glm_ocr/modeling_glm_ocr.py +1633 -0
- transformers/models/glm_ocr/modular_glm_ocr.py +428 -0
- transformers/models/glmasr/modeling_glmasr.py +34 -28
- transformers/models/glmasr/modular_glmasr.py +23 -11
- transformers/models/glpn/image_processing_glpn_fast.py +3 -3
- transformers/models/glpn/modeling_glpn.py +4 -2
- transformers/models/got_ocr2/configuration_got_ocr2.py +6 -6
- transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +3 -3
- transformers/models/got_ocr2/modeling_got_ocr2.py +31 -37
- transformers/models/got_ocr2/modular_got_ocr2.py +30 -19
- transformers/models/gpt2/configuration_gpt2.py +13 -1
- transformers/models/gpt2/modeling_gpt2.py +5 -5
- transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +7 -1
- transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +5 -4
- transformers/models/gpt_neo/configuration_gpt_neo.py +9 -1
- transformers/models/gpt_neo/modeling_gpt_neo.py +3 -7
- transformers/models/gpt_neox/configuration_gpt_neox.py +8 -3
- transformers/models/gpt_neox/modeling_gpt_neox.py +4 -4
- transformers/models/gpt_neox/modular_gpt_neox.py +4 -4
- transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +9 -1
- transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +2 -2
- transformers/models/gpt_oss/configuration_gpt_oss.py +10 -6
- transformers/models/gpt_oss/modeling_gpt_oss.py +46 -79
- transformers/models/gpt_oss/modular_gpt_oss.py +45 -78
- transformers/models/gptj/configuration_gptj.py +4 -4
- transformers/models/gptj/modeling_gptj.py +3 -7
- transformers/models/granite/configuration_granite.py +5 -7
- transformers/models/granite/modeling_granite.py +4 -4
- transformers/models/granite_speech/modeling_granite_speech.py +63 -37
- transformers/models/granitemoe/configuration_granitemoe.py +5 -7
- transformers/models/granitemoe/modeling_granitemoe.py +4 -4
- transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +17 -7
- transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +22 -54
- transformers/models/granitemoehybrid/modular_granitemoehybrid.py +39 -45
- transformers/models/granitemoeshared/configuration_granitemoeshared.py +6 -7
- transformers/models/granitemoeshared/modeling_granitemoeshared.py +4 -4
- transformers/models/grounding_dino/configuration_grounding_dino.py +10 -45
- transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +11 -11
- transformers/models/grounding_dino/modeling_grounding_dino.py +68 -86
- transformers/models/groupvit/configuration_groupvit.py +4 -1
- transformers/models/groupvit/modeling_groupvit.py +29 -22
- transformers/models/helium/configuration_helium.py +5 -7
- transformers/models/helium/modeling_helium.py +4 -4
- transformers/models/hgnet_v2/configuration_hgnet_v2.py +2 -4
- transformers/models/hgnet_v2/modeling_hgnet_v2.py +6 -5
- transformers/models/hgnet_v2/modular_hgnet_v2.py +7 -8
- transformers/models/hiera/configuration_hiera.py +2 -4
- transformers/models/hiera/modeling_hiera.py +11 -8
- transformers/models/hubert/configuration_hubert.py +4 -1
- transformers/models/hubert/modeling_hubert.py +7 -4
- transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +5 -7
- transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +28 -4
- transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +28 -6
- transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +6 -8
- transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +22 -9
- transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +22 -8
- transformers/models/ibert/configuration_ibert.py +4 -1
- transformers/models/idefics/configuration_idefics.py +5 -7
- transformers/models/idefics/modeling_idefics.py +3 -4
- transformers/models/idefics/vision.py +5 -4
- transformers/models/idefics2/configuration_idefics2.py +1 -2
- transformers/models/idefics2/image_processing_idefics2_fast.py +1 -0
- transformers/models/idefics2/modeling_idefics2.py +72 -50
- transformers/models/idefics3/configuration_idefics3.py +1 -3
- transformers/models/idefics3/image_processing_idefics3_fast.py +29 -3
- transformers/models/idefics3/modeling_idefics3.py +63 -40
- transformers/models/ijepa/modeling_ijepa.py +3 -3
- transformers/models/imagegpt/configuration_imagegpt.py +9 -1
- transformers/models/imagegpt/image_processing_imagegpt_fast.py +2 -2
- transformers/models/imagegpt/modeling_imagegpt.py +8 -4
- transformers/models/informer/modeling_informer.py +3 -3
- transformers/models/instructblip/configuration_instructblip.py +2 -1
- transformers/models/instructblip/modeling_instructblip.py +65 -39
- transformers/models/instructblipvideo/configuration_instructblipvideo.py +2 -1
- transformers/models/instructblipvideo/modeling_instructblipvideo.py +60 -57
- transformers/models/instructblipvideo/modular_instructblipvideo.py +43 -32
- transformers/models/instructblipvideo/video_processing_instructblipvideo.py +2 -2
- transformers/models/internvl/configuration_internvl.py +5 -0
- transformers/models/internvl/modeling_internvl.py +35 -55
- transformers/models/internvl/modular_internvl.py +26 -38
- transformers/models/internvl/video_processing_internvl.py +2 -2
- transformers/models/jais2/configuration_jais2.py +5 -7
- transformers/models/jais2/modeling_jais2.py +4 -4
- transformers/models/jamba/configuration_jamba.py +5 -7
- transformers/models/jamba/modeling_jamba.py +4 -4
- transformers/models/jamba/modular_jamba.py +3 -3
- transformers/models/janus/image_processing_janus.py +2 -2
- transformers/models/janus/image_processing_janus_fast.py +8 -8
- transformers/models/janus/modeling_janus.py +63 -146
- transformers/models/janus/modular_janus.py +62 -20
- transformers/models/jetmoe/configuration_jetmoe.py +6 -4
- transformers/models/jetmoe/modeling_jetmoe.py +3 -3
- transformers/models/jetmoe/modular_jetmoe.py +3 -3
- transformers/models/kosmos2/configuration_kosmos2.py +10 -8
- transformers/models/kosmos2/modeling_kosmos2.py +56 -34
- transformers/models/kosmos2_5/configuration_kosmos2_5.py +8 -8
- transformers/models/kosmos2_5/modeling_kosmos2_5.py +54 -63
- transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +8 -3
- transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +44 -40
- transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +1 -1
- transformers/models/lasr/configuration_lasr.py +2 -4
- transformers/models/lasr/modeling_lasr.py +3 -3
- transformers/models/lasr/modular_lasr.py +3 -3
- transformers/models/layoutlm/configuration_layoutlm.py +14 -1
- transformers/models/layoutlm/modeling_layoutlm.py +3 -3
- transformers/models/layoutlmv2/configuration_layoutlmv2.py +14 -16
- transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +2 -2
- transformers/models/layoutlmv3/configuration_layoutlmv3.py +16 -18
- transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +2 -2
- transformers/models/layoutxlm/configuration_layoutxlm.py +14 -16
- transformers/models/led/configuration_led.py +7 -8
- transformers/models/levit/image_processing_levit_fast.py +4 -4
- transformers/models/lfm2/configuration_lfm2.py +5 -7
- transformers/models/lfm2/modeling_lfm2.py +4 -4
- transformers/models/lfm2/modular_lfm2.py +3 -3
- transformers/models/lfm2_moe/configuration_lfm2_moe.py +5 -7
- transformers/models/lfm2_moe/modeling_lfm2_moe.py +4 -4
- transformers/models/lfm2_vl/configuration_lfm2_vl.py +4 -0
- transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +9 -15
- transformers/models/lfm2_vl/modeling_lfm2_vl.py +42 -28
- transformers/models/lfm2_vl/modular_lfm2_vl.py +42 -27
- transformers/models/lightglue/image_processing_lightglue_fast.py +4 -3
- transformers/models/lightglue/modeling_lightglue.py +3 -3
- transformers/models/lightglue/modular_lightglue.py +3 -3
- transformers/models/lighton_ocr/modeling_lighton_ocr.py +31 -28
- transformers/models/lighton_ocr/modular_lighton_ocr.py +19 -18
- transformers/models/lilt/configuration_lilt.py +6 -1
- transformers/models/llama/configuration_llama.py +5 -7
- transformers/models/llama/modeling_llama.py +4 -4
- transformers/models/llama4/configuration_llama4.py +67 -47
- transformers/models/llama4/image_processing_llama4_fast.py +3 -3
- transformers/models/llama4/modeling_llama4.py +46 -44
- transformers/models/llava/configuration_llava.py +10 -0
- transformers/models/llava/image_processing_llava_fast.py +3 -3
- transformers/models/llava/modeling_llava.py +38 -65
- transformers/models/llava_next/configuration_llava_next.py +2 -1
- transformers/models/llava_next/image_processing_llava_next_fast.py +6 -6
- transformers/models/llava_next/modeling_llava_next.py +61 -60
- transformers/models/llava_next_video/configuration_llava_next_video.py +10 -6
- transformers/models/llava_next_video/modeling_llava_next_video.py +115 -100
- transformers/models/llava_next_video/modular_llava_next_video.py +110 -101
- transformers/models/llava_onevision/configuration_llava_onevision.py +10 -6
- transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +8 -7
- transformers/models/llava_onevision/modeling_llava_onevision.py +111 -105
- transformers/models/llava_onevision/modular_llava_onevision.py +106 -101
- transformers/models/longcat_flash/configuration_longcat_flash.py +7 -10
- transformers/models/longcat_flash/modeling_longcat_flash.py +7 -7
- transformers/models/longcat_flash/modular_longcat_flash.py +6 -5
- transformers/models/longformer/configuration_longformer.py +4 -1
- transformers/models/longt5/configuration_longt5.py +9 -6
- transformers/models/longt5/modeling_longt5.py +2 -1
- transformers/models/luke/configuration_luke.py +8 -1
- transformers/models/lw_detr/configuration_lw_detr.py +19 -31
- transformers/models/lw_detr/modeling_lw_detr.py +43 -44
- transformers/models/lw_detr/modular_lw_detr.py +36 -38
- transformers/models/lxmert/configuration_lxmert.py +16 -0
- transformers/models/m2m_100/configuration_m2m_100.py +7 -8
- transformers/models/m2m_100/modeling_m2m_100.py +3 -3
- transformers/models/mamba/configuration_mamba.py +5 -2
- transformers/models/mamba/modeling_mamba.py +18 -26
- transformers/models/mamba2/configuration_mamba2.py +5 -7
- transformers/models/mamba2/modeling_mamba2.py +22 -33
- transformers/models/marian/configuration_marian.py +10 -4
- transformers/models/marian/modeling_marian.py +4 -4
- transformers/models/markuplm/configuration_markuplm.py +4 -6
- transformers/models/markuplm/modeling_markuplm.py +3 -3
- transformers/models/mask2former/configuration_mask2former.py +12 -47
- transformers/models/mask2former/image_processing_mask2former_fast.py +8 -8
- transformers/models/mask2former/modeling_mask2former.py +18 -12
- transformers/models/maskformer/configuration_maskformer.py +14 -45
- transformers/models/maskformer/configuration_maskformer_swin.py +2 -4
- transformers/models/maskformer/image_processing_maskformer_fast.py +8 -8
- transformers/models/maskformer/modeling_maskformer.py +15 -9
- transformers/models/maskformer/modeling_maskformer_swin.py +2 -3
- transformers/models/mbart/configuration_mbart.py +9 -4
- transformers/models/mbart/modeling_mbart.py +9 -6
- transformers/models/megatron_bert/configuration_megatron_bert.py +13 -2
- transformers/models/megatron_bert/modeling_megatron_bert.py +0 -15
- transformers/models/metaclip_2/configuration_metaclip_2.py +4 -1
- transformers/models/metaclip_2/modeling_metaclip_2.py +49 -42
- transformers/models/metaclip_2/modular_metaclip_2.py +41 -25
- transformers/models/mgp_str/modeling_mgp_str.py +4 -2
- transformers/models/mimi/configuration_mimi.py +4 -0
- transformers/models/mimi/modeling_mimi.py +40 -36
- transformers/models/minimax/configuration_minimax.py +8 -11
- transformers/models/minimax/modeling_minimax.py +5 -5
- transformers/models/minimax/modular_minimax.py +9 -12
- transformers/models/minimax_m2/configuration_minimax_m2.py +8 -31
- transformers/models/minimax_m2/modeling_minimax_m2.py +4 -4
- transformers/models/minimax_m2/modular_minimax_m2.py +8 -31
- transformers/models/ministral/configuration_ministral.py +5 -7
- transformers/models/ministral/modeling_ministral.py +4 -4
- transformers/models/ministral/modular_ministral.py +5 -8
- transformers/models/ministral3/configuration_ministral3.py +4 -4
- transformers/models/ministral3/modeling_ministral3.py +4 -4
- transformers/models/ministral3/modular_ministral3.py +3 -3
- transformers/models/mistral/configuration_mistral.py +5 -7
- transformers/models/mistral/modeling_mistral.py +4 -4
- transformers/models/mistral/modular_mistral.py +3 -3
- transformers/models/mistral3/configuration_mistral3.py +4 -0
- transformers/models/mistral3/modeling_mistral3.py +36 -40
- transformers/models/mistral3/modular_mistral3.py +31 -32
- transformers/models/mixtral/configuration_mixtral.py +8 -11
- transformers/models/mixtral/modeling_mixtral.py +4 -4
- transformers/models/mlcd/modeling_mlcd.py +7 -5
- transformers/models/mlcd/modular_mlcd.py +7 -5
- transformers/models/mllama/configuration_mllama.py +5 -7
- transformers/models/mllama/image_processing_mllama_fast.py +6 -5
- transformers/models/mllama/modeling_mllama.py +19 -19
- transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +10 -45
- transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +66 -84
- transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +10 -45
- transformers/models/mobilebert/configuration_mobilebert.py +4 -1
- transformers/models/mobilebert/modeling_mobilebert.py +3 -3
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +4 -4
- transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +4 -2
- transformers/models/mobilevit/image_processing_mobilevit_fast.py +4 -4
- transformers/models/mobilevit/modeling_mobilevit.py +4 -2
- transformers/models/mobilevitv2/modeling_mobilevitv2.py +4 -2
- transformers/models/modernbert/configuration_modernbert.py +46 -21
- transformers/models/modernbert/modeling_modernbert.py +146 -899
- transformers/models/modernbert/modular_modernbert.py +185 -908
- transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +21 -13
- transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +9 -17
- transformers/models/modernbert_decoder/modular_modernbert_decoder.py +24 -23
- transformers/models/moonshine/configuration_moonshine.py +12 -7
- transformers/models/moonshine/modeling_moonshine.py +7 -7
- transformers/models/moonshine/modular_moonshine.py +19 -13
- transformers/models/moshi/configuration_moshi.py +28 -2
- transformers/models/moshi/modeling_moshi.py +4 -9
- transformers/models/mpnet/configuration_mpnet.py +6 -1
- transformers/models/mpt/configuration_mpt.py +16 -0
- transformers/models/mra/configuration_mra.py +8 -1
- transformers/models/mt5/configuration_mt5.py +9 -5
- transformers/models/mt5/modeling_mt5.py +5 -8
- transformers/models/musicgen/configuration_musicgen.py +12 -7
- transformers/models/musicgen/modeling_musicgen.py +6 -5
- transformers/models/musicgen_melody/configuration_musicgen_melody.py +15 -7
- transformers/models/musicgen_melody/modeling_musicgen_melody.py +7 -17
- transformers/models/mvp/configuration_mvp.py +8 -4
- transformers/models/mvp/modeling_mvp.py +6 -4
- transformers/models/nanochat/configuration_nanochat.py +5 -7
- transformers/models/nanochat/modeling_nanochat.py +4 -4
- transformers/models/nanochat/modular_nanochat.py +4 -4
- transformers/models/nemotron/configuration_nemotron.py +5 -7
- transformers/models/nemotron/modeling_nemotron.py +4 -14
- transformers/models/nllb/tokenization_nllb.py +7 -5
- transformers/models/nllb_moe/configuration_nllb_moe.py +7 -9
- transformers/models/nllb_moe/modeling_nllb_moe.py +3 -3
- transformers/models/nougat/image_processing_nougat_fast.py +8 -8
- transformers/models/nystromformer/configuration_nystromformer.py +8 -1
- transformers/models/olmo/configuration_olmo.py +5 -7
- transformers/models/olmo/modeling_olmo.py +4 -4
- transformers/models/olmo/modular_olmo.py +3 -3
- transformers/models/olmo2/configuration_olmo2.py +9 -11
- transformers/models/olmo2/modeling_olmo2.py +4 -4
- transformers/models/olmo2/modular_olmo2.py +7 -7
- transformers/models/olmo3/configuration_olmo3.py +10 -11
- transformers/models/olmo3/modeling_olmo3.py +4 -4
- transformers/models/olmo3/modular_olmo3.py +13 -14
- transformers/models/olmoe/configuration_olmoe.py +5 -7
- transformers/models/olmoe/modeling_olmoe.py +4 -4
- transformers/models/olmoe/modular_olmoe.py +3 -3
- transformers/models/omdet_turbo/configuration_omdet_turbo.py +14 -49
- transformers/models/omdet_turbo/modeling_omdet_turbo.py +22 -18
- transformers/models/oneformer/configuration_oneformer.py +9 -46
- transformers/models/oneformer/image_processing_oneformer_fast.py +8 -8
- transformers/models/oneformer/modeling_oneformer.py +14 -9
- transformers/models/openai/configuration_openai.py +16 -0
- transformers/models/opt/configuration_opt.py +6 -6
- transformers/models/opt/modeling_opt.py +5 -5
- transformers/models/ovis2/configuration_ovis2.py +4 -0
- transformers/models/ovis2/image_processing_ovis2_fast.py +3 -3
- transformers/models/ovis2/modeling_ovis2.py +58 -99
- transformers/models/ovis2/modular_ovis2.py +52 -13
- transformers/models/owlv2/configuration_owlv2.py +4 -1
- transformers/models/owlv2/image_processing_owlv2_fast.py +5 -5
- transformers/models/owlv2/modeling_owlv2.py +40 -27
- transformers/models/owlv2/modular_owlv2.py +5 -5
- transformers/models/owlvit/configuration_owlvit.py +4 -1
- transformers/models/owlvit/modeling_owlvit.py +40 -27
- transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +9 -10
- transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +88 -87
- transformers/models/paddleocr_vl/modular_paddleocr_vl.py +82 -53
- transformers/models/paligemma/configuration_paligemma.py +4 -0
- transformers/models/paligemma/modeling_paligemma.py +30 -26
- transformers/models/parakeet/configuration_parakeet.py +2 -4
- transformers/models/parakeet/modeling_parakeet.py +3 -3
- transformers/models/parakeet/modular_parakeet.py +3 -3
- transformers/models/patchtsmixer/modeling_patchtsmixer.py +3 -3
- transformers/models/patchtst/modeling_patchtst.py +3 -3
- transformers/models/pe_audio/modeling_pe_audio.py +4 -4
- transformers/models/pe_audio/modular_pe_audio.py +1 -1
- transformers/models/pe_audio_video/modeling_pe_audio_video.py +4 -4
- transformers/models/pe_audio_video/modular_pe_audio_video.py +4 -4
- transformers/models/pe_video/modeling_pe_video.py +36 -24
- transformers/models/pe_video/modular_pe_video.py +36 -23
- transformers/models/pegasus/configuration_pegasus.py +8 -5
- transformers/models/pegasus/modeling_pegasus.py +4 -4
- transformers/models/pegasus_x/configuration_pegasus_x.py +5 -3
- transformers/models/pegasus_x/modeling_pegasus_x.py +3 -3
- transformers/models/perceiver/image_processing_perceiver_fast.py +2 -2
- transformers/models/perceiver/modeling_perceiver.py +17 -9
- transformers/models/perception_lm/modeling_perception_lm.py +26 -27
- transformers/models/perception_lm/modular_perception_lm.py +27 -25
- transformers/models/persimmon/configuration_persimmon.py +5 -7
- transformers/models/persimmon/modeling_persimmon.py +5 -5
- transformers/models/phi/configuration_phi.py +8 -6
- transformers/models/phi/modeling_phi.py +4 -4
- transformers/models/phi/modular_phi.py +3 -3
- transformers/models/phi3/configuration_phi3.py +9 -11
- transformers/models/phi3/modeling_phi3.py +4 -4
- transformers/models/phi3/modular_phi3.py +3 -3
- transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +11 -13
- transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +4 -4
- transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +46 -61
- transformers/models/phi4_multimodal/modular_phi4_multimodal.py +44 -30
- transformers/models/phimoe/configuration_phimoe.py +5 -7
- transformers/models/phimoe/modeling_phimoe.py +15 -39
- transformers/models/phimoe/modular_phimoe.py +12 -7
- transformers/models/pix2struct/configuration_pix2struct.py +12 -9
- transformers/models/pix2struct/image_processing_pix2struct_fast.py +5 -5
- transformers/models/pix2struct/modeling_pix2struct.py +14 -7
- transformers/models/pixio/configuration_pixio.py +2 -4
- transformers/models/pixio/modeling_pixio.py +9 -8
- transformers/models/pixio/modular_pixio.py +4 -2
- transformers/models/pixtral/image_processing_pixtral_fast.py +5 -5
- transformers/models/pixtral/modeling_pixtral.py +9 -12
- transformers/models/plbart/configuration_plbart.py +8 -5
- transformers/models/plbart/modeling_plbart.py +9 -7
- transformers/models/plbart/modular_plbart.py +1 -1
- transformers/models/poolformer/image_processing_poolformer_fast.py +7 -7
- transformers/models/pop2piano/configuration_pop2piano.py +7 -6
- transformers/models/pop2piano/modeling_pop2piano.py +2 -1
- transformers/models/pp_doclayout_v3/__init__.py +30 -0
- transformers/models/pp_doclayout_v3/configuration_pp_doclayout_v3.py +277 -0
- transformers/models/pp_doclayout_v3/image_processing_pp_doclayout_v3_fast.py +305 -0
- transformers/models/pp_doclayout_v3/modeling_pp_doclayout_v3.py +2083 -0
- transformers/models/pp_doclayout_v3/modular_pp_doclayout_v3.py +1549 -0
- transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +12 -46
- transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +6 -6
- transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +8 -6
- transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +12 -10
- transformers/models/prophetnet/configuration_prophetnet.py +11 -10
- transformers/models/prophetnet/modeling_prophetnet.py +12 -23
- transformers/models/pvt/image_processing_pvt.py +7 -7
- transformers/models/pvt/image_processing_pvt_fast.py +1 -1
- transformers/models/pvt_v2/configuration_pvt_v2.py +2 -4
- transformers/models/pvt_v2/modeling_pvt_v2.py +6 -5
- transformers/models/qwen2/configuration_qwen2.py +14 -4
- transformers/models/qwen2/modeling_qwen2.py +4 -4
- transformers/models/qwen2/modular_qwen2.py +3 -3
- transformers/models/qwen2/tokenization_qwen2.py +0 -4
- transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +17 -5
- transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +108 -88
- transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +115 -87
- transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +7 -10
- transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +98 -53
- transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +18 -6
- transformers/models/qwen2_audio/modeling_qwen2_audio.py +12 -12
- transformers/models/qwen2_moe/configuration_qwen2_moe.py +14 -4
- transformers/models/qwen2_moe/modeling_qwen2_moe.py +4 -4
- transformers/models/qwen2_moe/modular_qwen2_moe.py +3 -3
- transformers/models/qwen2_vl/configuration_qwen2_vl.py +7 -10
- transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +4 -6
- transformers/models/qwen2_vl/modeling_qwen2_vl.py +97 -53
- transformers/models/qwen2_vl/video_processing_qwen2_vl.py +4 -6
- transformers/models/qwen3/configuration_qwen3.py +15 -5
- transformers/models/qwen3/modeling_qwen3.py +4 -4
- transformers/models/qwen3/modular_qwen3.py +3 -3
- transformers/models/qwen3_moe/configuration_qwen3_moe.py +20 -7
- transformers/models/qwen3_moe/modeling_qwen3_moe.py +4 -4
- transformers/models/qwen3_next/configuration_qwen3_next.py +16 -4
- transformers/models/qwen3_next/modeling_qwen3_next.py +5 -5
- transformers/models/qwen3_next/modular_qwen3_next.py +4 -4
- transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +55 -19
- transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +161 -98
- transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +107 -34
- transformers/models/qwen3_vl/configuration_qwen3_vl.py +7 -6
- transformers/models/qwen3_vl/modeling_qwen3_vl.py +115 -49
- transformers/models/qwen3_vl/modular_qwen3_vl.py +88 -37
- transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +7 -6
- transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +173 -99
- transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +23 -7
- transformers/models/rag/configuration_rag.py +6 -6
- transformers/models/rag/modeling_rag.py +3 -3
- transformers/models/rag/retrieval_rag.py +1 -1
- transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +8 -6
- transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +4 -5
- transformers/models/reformer/configuration_reformer.py +7 -7
- transformers/models/rembert/configuration_rembert.py +8 -1
- transformers/models/rembert/modeling_rembert.py +0 -22
- transformers/models/resnet/configuration_resnet.py +2 -4
- transformers/models/resnet/modeling_resnet.py +6 -5
- transformers/models/roberta/configuration_roberta.py +11 -2
- transformers/models/roberta/modeling_roberta.py +6 -6
- transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +11 -2
- transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +6 -6
- transformers/models/roc_bert/configuration_roc_bert.py +8 -1
- transformers/models/roc_bert/modeling_roc_bert.py +6 -41
- transformers/models/roformer/configuration_roformer.py +13 -2
- transformers/models/roformer/modeling_roformer.py +0 -14
- transformers/models/rt_detr/configuration_rt_detr.py +8 -49
- transformers/models/rt_detr/configuration_rt_detr_resnet.py +2 -4
- transformers/models/rt_detr/image_processing_rt_detr_fast.py +24 -11
- transformers/models/rt_detr/modeling_rt_detr.py +578 -737
- transformers/models/rt_detr/modeling_rt_detr_resnet.py +2 -3
- transformers/models/rt_detr/modular_rt_detr.py +1508 -6
- transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +12 -57
- transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +318 -453
- transformers/models/rt_detr_v2/modular_rt_detr_v2.py +25 -66
- transformers/models/rwkv/configuration_rwkv.py +2 -3
- transformers/models/rwkv/modeling_rwkv.py +0 -23
- transformers/models/sam/configuration_sam.py +2 -0
- transformers/models/sam/image_processing_sam_fast.py +4 -4
- transformers/models/sam/modeling_sam.py +13 -8
- transformers/models/sam/processing_sam.py +3 -3
- transformers/models/sam2/configuration_sam2.py +1 -1
- transformers/models/sam2/modeling_sam2.py +56 -52
- transformers/models/sam2/modular_sam2.py +47 -55
- transformers/models/sam2_video/modeling_sam2_video.py +50 -51
- transformers/models/sam2_video/modular_sam2_video.py +12 -10
- transformers/models/sam3/modeling_sam3.py +43 -47
- transformers/models/sam3/processing_sam3.py +8 -4
- transformers/models/sam3_tracker/configuration_sam3_tracker.py +1 -2
- transformers/models/sam3_tracker/modeling_sam3_tracker.py +50 -49
- transformers/models/sam3_tracker/modular_sam3_tracker.py +0 -1
- transformers/models/sam3_tracker/processing_sam3_tracker.py +0 -1
- transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +50 -49
- transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +10 -22
- transformers/models/sam3_video/modeling_sam3_video.py +27 -14
- transformers/models/sam_hq/configuration_sam_hq.py +2 -0
- transformers/models/sam_hq/modeling_sam_hq.py +13 -9
- transformers/models/sam_hq/modular_sam_hq.py +6 -6
- transformers/models/sam_hq/processing_sam_hq.py +7 -6
- transformers/models/seamless_m4t/configuration_seamless_m4t.py +8 -9
- transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +8 -9
- transformers/models/seed_oss/configuration_seed_oss.py +7 -9
- transformers/models/seed_oss/modeling_seed_oss.py +4 -4
- transformers/models/seed_oss/modular_seed_oss.py +3 -3
- transformers/models/segformer/image_processing_segformer_fast.py +4 -4
- transformers/models/segformer/modeling_segformer.py +4 -2
- transformers/models/segformer/modular_segformer.py +3 -3
- transformers/models/seggpt/modeling_seggpt.py +20 -8
- transformers/models/sew/configuration_sew.py +4 -1
- transformers/models/sew/modeling_sew.py +9 -5
- transformers/models/sew/modular_sew.py +2 -1
- transformers/models/sew_d/configuration_sew_d.py +4 -1
- transformers/models/sew_d/modeling_sew_d.py +4 -1
- transformers/models/shieldgemma2/modeling_shieldgemma2.py +4 -4
- transformers/models/siglip/configuration_siglip.py +4 -1
- transformers/models/siglip/modeling_siglip.py +27 -71
- transformers/models/siglip2/__init__.py +1 -0
- transformers/models/siglip2/configuration_siglip2.py +4 -2
- transformers/models/siglip2/image_processing_siglip2_fast.py +2 -2
- transformers/models/siglip2/modeling_siglip2.py +37 -78
- transformers/models/siglip2/modular_siglip2.py +74 -25
- transformers/models/siglip2/tokenization_siglip2.py +95 -0
- transformers/models/smollm3/configuration_smollm3.py +6 -6
- transformers/models/smollm3/modeling_smollm3.py +4 -4
- transformers/models/smollm3/modular_smollm3.py +9 -9
- transformers/models/smolvlm/configuration_smolvlm.py +1 -3
- transformers/models/smolvlm/image_processing_smolvlm_fast.py +29 -3
- transformers/models/smolvlm/modeling_smolvlm.py +75 -46
- transformers/models/smolvlm/modular_smolvlm.py +36 -23
- transformers/models/smolvlm/video_processing_smolvlm.py +9 -9
- transformers/models/solar_open/__init__.py +27 -0
- transformers/models/solar_open/configuration_solar_open.py +184 -0
- transformers/models/solar_open/modeling_solar_open.py +642 -0
- transformers/models/solar_open/modular_solar_open.py +224 -0
- transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +6 -4
- transformers/models/speech_to_text/configuration_speech_to_text.py +9 -8
- transformers/models/speech_to_text/modeling_speech_to_text.py +3 -3
- transformers/models/speecht5/configuration_speecht5.py +7 -8
- transformers/models/splinter/configuration_splinter.py +6 -6
- transformers/models/splinter/modeling_splinter.py +8 -3
- transformers/models/squeezebert/configuration_squeezebert.py +14 -1
- transformers/models/stablelm/configuration_stablelm.py +8 -6
- transformers/models/stablelm/modeling_stablelm.py +5 -5
- transformers/models/starcoder2/configuration_starcoder2.py +11 -5
- transformers/models/starcoder2/modeling_starcoder2.py +5 -5
- transformers/models/starcoder2/modular_starcoder2.py +4 -4
- transformers/models/superglue/configuration_superglue.py +4 -0
- transformers/models/superglue/image_processing_superglue_fast.py +4 -3
- transformers/models/superglue/modeling_superglue.py +9 -4
- transformers/models/superpoint/image_processing_superpoint_fast.py +3 -4
- transformers/models/superpoint/modeling_superpoint.py +4 -2
- transformers/models/swin/configuration_swin.py +2 -4
- transformers/models/swin/modeling_swin.py +11 -8
- transformers/models/swin2sr/image_processing_swin2sr_fast.py +2 -2
- transformers/models/swin2sr/modeling_swin2sr.py +4 -2
- transformers/models/swinv2/configuration_swinv2.py +2 -4
- transformers/models/swinv2/modeling_swinv2.py +10 -7
- transformers/models/switch_transformers/configuration_switch_transformers.py +11 -6
- transformers/models/switch_transformers/modeling_switch_transformers.py +3 -3
- transformers/models/switch_transformers/modular_switch_transformers.py +3 -3
- transformers/models/t5/configuration_t5.py +9 -8
- transformers/models/t5/modeling_t5.py +5 -8
- transformers/models/t5gemma/configuration_t5gemma.py +10 -25
- transformers/models/t5gemma/modeling_t5gemma.py +9 -9
- transformers/models/t5gemma/modular_t5gemma.py +11 -24
- transformers/models/t5gemma2/configuration_t5gemma2.py +35 -48
- transformers/models/t5gemma2/modeling_t5gemma2.py +143 -100
- transformers/models/t5gemma2/modular_t5gemma2.py +152 -136
- transformers/models/table_transformer/configuration_table_transformer.py +18 -49
- transformers/models/table_transformer/modeling_table_transformer.py +27 -53
- transformers/models/tapas/configuration_tapas.py +12 -1
- transformers/models/tapas/modeling_tapas.py +1 -1
- transformers/models/tapas/tokenization_tapas.py +1 -0
- transformers/models/textnet/configuration_textnet.py +4 -6
- transformers/models/textnet/image_processing_textnet_fast.py +3 -3
- transformers/models/textnet/modeling_textnet.py +15 -14
- transformers/models/time_series_transformer/modeling_time_series_transformer.py +3 -3
- transformers/models/timesfm/modeling_timesfm.py +5 -6
- transformers/models/timesfm/modular_timesfm.py +5 -6
- transformers/models/timm_backbone/configuration_timm_backbone.py +33 -7
- transformers/models/timm_backbone/modeling_timm_backbone.py +21 -24
- transformers/models/timm_wrapper/modeling_timm_wrapper.py +9 -4
- transformers/models/trocr/configuration_trocr.py +11 -7
- transformers/models/trocr/modeling_trocr.py +4 -2
- transformers/models/tvp/configuration_tvp.py +10 -35
- transformers/models/tvp/image_processing_tvp_fast.py +6 -5
- transformers/models/tvp/modeling_tvp.py +1 -1
- transformers/models/udop/configuration_udop.py +16 -7
- transformers/models/udop/modeling_udop.py +10 -6
- transformers/models/umt5/configuration_umt5.py +8 -6
- transformers/models/umt5/modeling_umt5.py +7 -3
- transformers/models/unispeech/configuration_unispeech.py +4 -1
- transformers/models/unispeech/modeling_unispeech.py +7 -4
- transformers/models/unispeech_sat/configuration_unispeech_sat.py +4 -1
- transformers/models/unispeech_sat/modeling_unispeech_sat.py +7 -4
- transformers/models/upernet/configuration_upernet.py +8 -35
- transformers/models/upernet/modeling_upernet.py +1 -1
- transformers/models/vaultgemma/configuration_vaultgemma.py +5 -7
- transformers/models/vaultgemma/modeling_vaultgemma.py +4 -4
- transformers/models/video_llama_3/configuration_video_llama_3.py +4 -0
- transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +4 -6
- transformers/models/video_llama_3/modeling_video_llama_3.py +85 -48
- transformers/models/video_llama_3/modular_video_llama_3.py +56 -43
- transformers/models/video_llama_3/video_processing_video_llama_3.py +29 -8
- transformers/models/video_llava/configuration_video_llava.py +4 -0
- transformers/models/video_llava/modeling_video_llava.py +87 -89
- transformers/models/videomae/modeling_videomae.py +4 -5
- transformers/models/vilt/configuration_vilt.py +4 -1
- transformers/models/vilt/image_processing_vilt_fast.py +6 -6
- transformers/models/vilt/modeling_vilt.py +27 -12
- transformers/models/vipllava/configuration_vipllava.py +4 -0
- transformers/models/vipllava/modeling_vipllava.py +57 -31
- transformers/models/vipllava/modular_vipllava.py +50 -24
- transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +10 -6
- transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +27 -20
- transformers/models/visual_bert/configuration_visual_bert.py +6 -1
- transformers/models/vit/configuration_vit.py +2 -2
- transformers/models/vit/modeling_vit.py +7 -5
- transformers/models/vit_mae/modeling_vit_mae.py +11 -7
- transformers/models/vit_msn/modeling_vit_msn.py +11 -7
- transformers/models/vitdet/configuration_vitdet.py +2 -4
- transformers/models/vitdet/modeling_vitdet.py +2 -3
- transformers/models/vitmatte/configuration_vitmatte.py +6 -35
- transformers/models/vitmatte/image_processing_vitmatte_fast.py +2 -2
- transformers/models/vitmatte/modeling_vitmatte.py +1 -1
- transformers/models/vitpose/configuration_vitpose.py +6 -43
- transformers/models/vitpose/modeling_vitpose.py +5 -3
- transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +2 -4
- transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +5 -6
- transformers/models/vits/configuration_vits.py +4 -0
- transformers/models/vits/modeling_vits.py +9 -7
- transformers/models/vivit/modeling_vivit.py +4 -4
- transformers/models/vjepa2/modeling_vjepa2.py +9 -9
- transformers/models/voxtral/configuration_voxtral.py +0 -1
- transformers/models/voxtral/modeling_voxtral.py +25 -24
- transformers/models/voxtral/modular_voxtral.py +26 -20
- transformers/models/wav2vec2/configuration_wav2vec2.py +4 -1
- transformers/models/wav2vec2/modeling_wav2vec2.py +7 -4
- transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +4 -1
- transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +4 -1
- transformers/models/wavlm/configuration_wavlm.py +4 -1
- transformers/models/wavlm/modeling_wavlm.py +4 -1
- transformers/models/whisper/configuration_whisper.py +6 -4
- transformers/models/whisper/generation_whisper.py +0 -1
- transformers/models/whisper/modeling_whisper.py +3 -3
- transformers/models/x_clip/configuration_x_clip.py +4 -1
- transformers/models/x_clip/modeling_x_clip.py +26 -27
- transformers/models/xglm/configuration_xglm.py +9 -7
- transformers/models/xlm/configuration_xlm.py +10 -7
- transformers/models/xlm/modeling_xlm.py +1 -1
- transformers/models/xlm_roberta/configuration_xlm_roberta.py +11 -2
- transformers/models/xlm_roberta/modeling_xlm_roberta.py +6 -6
- transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +10 -1
- transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +6 -6
- transformers/models/xlnet/configuration_xlnet.py +3 -1
- transformers/models/xlstm/configuration_xlstm.py +5 -7
- transformers/models/xlstm/modeling_xlstm.py +0 -32
- transformers/models/xmod/configuration_xmod.py +11 -2
- transformers/models/xmod/modeling_xmod.py +13 -16
- transformers/models/yolos/image_processing_yolos_fast.py +25 -28
- transformers/models/yolos/modeling_yolos.py +7 -7
- transformers/models/yolos/modular_yolos.py +16 -16
- transformers/models/yoso/configuration_yoso.py +8 -1
- transformers/models/youtu/__init__.py +27 -0
- transformers/models/youtu/configuration_youtu.py +194 -0
- transformers/models/youtu/modeling_youtu.py +619 -0
- transformers/models/youtu/modular_youtu.py +254 -0
- transformers/models/zamba/configuration_zamba.py +5 -7
- transformers/models/zamba/modeling_zamba.py +25 -56
- transformers/models/zamba2/configuration_zamba2.py +8 -13
- transformers/models/zamba2/modeling_zamba2.py +53 -78
- transformers/models/zamba2/modular_zamba2.py +36 -29
- transformers/models/zoedepth/configuration_zoedepth.py +17 -40
- transformers/models/zoedepth/image_processing_zoedepth_fast.py +9 -9
- transformers/models/zoedepth/modeling_zoedepth.py +5 -3
- transformers/pipelines/__init__.py +1 -61
- transformers/pipelines/any_to_any.py +1 -1
- transformers/pipelines/automatic_speech_recognition.py +0 -2
- transformers/pipelines/base.py +1 -1
- transformers/pipelines/image_text_to_text.py +1 -1
- transformers/pipelines/text_to_audio.py +5 -1
- transformers/processing_utils.py +35 -44
- transformers/pytorch_utils.py +2 -26
- transformers/quantizers/quantizer_compressed_tensors.py +7 -5
- transformers/quantizers/quantizer_fbgemm_fp8.py +20 -23
- transformers/quantizers/quantizer_finegrained_fp8.py +14 -20
- transformers/quantizers/quantizer_mxfp4.py +1 -1
- transformers/quantizers/quantizer_torchao.py +0 -16
- transformers/safetensors_conversion.py +11 -4
- transformers/testing_utils.py +3 -28
- transformers/tokenization_mistral_common.py +9 -0
- transformers/tokenization_python.py +6 -4
- transformers/tokenization_utils_base.py +119 -219
- transformers/tokenization_utils_tokenizers.py +31 -2
- transformers/trainer.py +25 -33
- transformers/trainer_seq2seq.py +1 -1
- transformers/training_args.py +411 -417
- transformers/utils/__init__.py +1 -4
- transformers/utils/auto_docstring.py +15 -18
- transformers/utils/backbone_utils.py +13 -373
- transformers/utils/doc.py +4 -36
- transformers/utils/generic.py +69 -33
- transformers/utils/import_utils.py +72 -75
- transformers/utils/loading_report.py +133 -105
- transformers/utils/quantization_config.py +0 -21
- transformers/video_processing_utils.py +5 -5
- transformers/video_utils.py +3 -1
- {transformers-5.0.0rc3.dist-info → transformers-5.1.0.dist-info}/METADATA +118 -237
- {transformers-5.0.0rc3.dist-info → transformers-5.1.0.dist-info}/RECORD +1019 -994
- {transformers-5.0.0rc3.dist-info → transformers-5.1.0.dist-info}/WHEEL +1 -1
- transformers/pipelines/deprecated/text2text_generation.py +0 -408
- transformers/pipelines/image_to_text.py +0 -189
- {transformers-5.0.0rc3.dist-info → transformers-5.1.0.dist-info}/entry_points.txt +0 -0
- {transformers-5.0.0rc3.dist-info → transformers-5.1.0.dist-info}/licenses/LICENSE +0 -0
- {transformers-5.0.0rc3.dist-info → transformers-5.1.0.dist-info}/top_level.txt +0 -0
|
@@ -59,7 +59,6 @@ from .base import (
|
|
|
59
59
|
get_default_model_and_revision,
|
|
60
60
|
load_model,
|
|
61
61
|
)
|
|
62
|
-
from .deprecated import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline
|
|
63
62
|
from .depth_estimation import DepthEstimationPipeline
|
|
64
63
|
from .document_question_answering import DocumentQuestionAnsweringPipeline
|
|
65
64
|
from .feature_extraction import FeatureExtractionPipeline
|
|
@@ -69,7 +68,6 @@ from .image_feature_extraction import ImageFeatureExtractionPipeline
|
|
|
69
68
|
from .image_segmentation import ImageSegmentationPipeline
|
|
70
69
|
from .image_text_to_text import ImageTextToTextPipeline
|
|
71
70
|
from .image_to_image import ImageToImagePipeline
|
|
72
|
-
from .image_to_text import ImageToTextPipeline
|
|
73
71
|
from .keypoint_matching import KeypointMatchingPipeline
|
|
74
72
|
from .mask_generation import MaskGenerationPipeline
|
|
75
73
|
from .object_detection import ObjectDetectionPipeline
|
|
@@ -207,29 +205,6 @@ SUPPORTED_TASKS = {
|
|
|
207
205
|
"default": {"model": ("distilbert/distilroberta-base", "fb53ab8")},
|
|
208
206
|
"type": "text",
|
|
209
207
|
},
|
|
210
|
-
"summarization": {
|
|
211
|
-
"impl": SummarizationPipeline,
|
|
212
|
-
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
|
213
|
-
"default": {"model": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e")},
|
|
214
|
-
"type": "text",
|
|
215
|
-
},
|
|
216
|
-
# This task is a special case as it's parametrized by SRC, TGT languages.
|
|
217
|
-
"translation": {
|
|
218
|
-
"impl": TranslationPipeline,
|
|
219
|
-
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
|
220
|
-
"default": {
|
|
221
|
-
("en", "fr"): {"model": ("google-t5/t5-base", "a9723ea")},
|
|
222
|
-
("en", "de"): {"model": ("google-t5/t5-base", "a9723ea")},
|
|
223
|
-
("en", "ro"): {"model": ("google-t5/t5-base", "a9723ea")},
|
|
224
|
-
},
|
|
225
|
-
"type": "text",
|
|
226
|
-
},
|
|
227
|
-
"text2text-generation": {
|
|
228
|
-
"impl": Text2TextGenerationPipeline,
|
|
229
|
-
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
|
|
230
|
-
"default": {"model": ("google-t5/t5-base", "a9723ea")},
|
|
231
|
-
"type": "text",
|
|
232
|
-
},
|
|
233
208
|
"text-generation": {
|
|
234
209
|
"impl": TextGenerationPipeline,
|
|
235
210
|
"pt": (AutoModelForCausalLM,) if is_torch_available() else (),
|
|
@@ -275,12 +250,6 @@ SUPPORTED_TASKS = {
|
|
|
275
250
|
"default": {"model": ("facebook/detr-resnet-50-panoptic", "d53b52a")},
|
|
276
251
|
"type": "multimodal",
|
|
277
252
|
},
|
|
278
|
-
"image-to-text": {
|
|
279
|
-
"impl": ImageToTextPipeline,
|
|
280
|
-
"pt": (AutoModelForImageTextToText,) if is_torch_available() else (),
|
|
281
|
-
"default": {"model": ("ydshieh/vit-gpt2-coco-en", "e460201")},
|
|
282
|
-
"type": "multimodal",
|
|
283
|
-
},
|
|
284
253
|
"image-text-to-text": {
|
|
285
254
|
"impl": ImageTextToTextPipeline,
|
|
286
255
|
"pt": (AutoModelForImageTextToText,) if is_torch_available() else (),
|
|
@@ -388,20 +357,15 @@ def check_task(task: str) -> tuple[str, dict, Any]:
|
|
|
388
357
|
- `"image-classification"`
|
|
389
358
|
- `"image-feature-extraction"`
|
|
390
359
|
- `"image-segmentation"`
|
|
391
|
-
- `"image-to-text"`
|
|
392
360
|
- `"image-to-image"`
|
|
393
361
|
- `"keypoint-matching"`
|
|
394
362
|
- `"object-detection"`
|
|
395
363
|
- `"question-answering"`
|
|
396
|
-
- `"summarization"`
|
|
397
364
|
- `"table-question-answering"`
|
|
398
|
-
- `"text2text-generation"`
|
|
399
365
|
- `"text-classification"` (alias `"sentiment-analysis"` available)
|
|
400
366
|
- `"text-generation"`
|
|
401
367
|
- `"text-to-audio"` (alias `"text-to-speech"` available)
|
|
402
368
|
- `"token-classification"` (alias `"ner"` available)
|
|
403
|
-
- `"translation"`
|
|
404
|
-
- `"translation_xx_to_yy"`
|
|
405
369
|
- `"video-classification"`
|
|
406
370
|
- `"visual-question-answering"` (alias `"vqa"` available)
|
|
407
371
|
- `"zero-shot-classification"`
|
|
@@ -410,8 +374,7 @@ def check_task(task: str) -> tuple[str, dict, Any]:
|
|
|
410
374
|
|
|
411
375
|
Returns:
|
|
412
376
|
(normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
|
|
413
|
-
(removed alias and options).
|
|
414
|
-
options for parametrized tasks like "translation_xx_to_yy"
|
|
377
|
+
(removed alias and options).
|
|
415
378
|
|
|
416
379
|
|
|
417
380
|
"""
|
|
@@ -469,8 +432,6 @@ def pipeline(task: Literal["image-text-to-text"], model: str | PreTrainedModel |
|
|
|
469
432
|
@overload
|
|
470
433
|
def pipeline(task: Literal["image-to-image"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> ImageToImagePipeline: ...
|
|
471
434
|
@overload
|
|
472
|
-
def pipeline(task: Literal["image-to-text"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> ImageToTextPipeline: ...
|
|
473
|
-
@overload
|
|
474
435
|
def pipeline(task: Literal["keypoint-matching"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> KeypointMatchingPipeline: ...
|
|
475
436
|
@overload
|
|
476
437
|
def pipeline(task: Literal["mask-generation"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> MaskGenerationPipeline: ...
|
|
@@ -479,8 +440,6 @@ def pipeline(task: Literal["object-detection"], model: str | PreTrainedModel | N
|
|
|
479
440
|
@overload
|
|
480
441
|
def pipeline(task: Literal["question-answering"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> QuestionAnsweringPipeline: ...
|
|
481
442
|
@overload
|
|
482
|
-
def pipeline(task: Literal["summarization"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> SummarizationPipeline: ...
|
|
483
|
-
@overload
|
|
484
443
|
def pipeline(task: Literal["table-question-answering"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> TableQuestionAnsweringPipeline: ...
|
|
485
444
|
@overload
|
|
486
445
|
def pipeline(task: Literal["text-classification"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> TextClassificationPipeline: ...
|
|
@@ -489,12 +448,8 @@ def pipeline(task: Literal["text-generation"], model: str | PreTrainedModel | No
|
|
|
489
448
|
@overload
|
|
490
449
|
def pipeline(task: Literal["text-to-audio"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> TextToAudioPipeline: ...
|
|
491
450
|
@overload
|
|
492
|
-
def pipeline(task: Literal["text2text-generation"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> Text2TextGenerationPipeline: ...
|
|
493
|
-
@overload
|
|
494
451
|
def pipeline(task: Literal["token-classification"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> TokenClassificationPipeline: ...
|
|
495
452
|
@overload
|
|
496
|
-
def pipeline(task: Literal["translation"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> TranslationPipeline: ...
|
|
497
|
-
@overload
|
|
498
453
|
def pipeline(task: Literal["video-classification"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> VideoClassificationPipeline: ...
|
|
499
454
|
@overload
|
|
500
455
|
def pipeline(task: Literal["visual-question-answering"], model: str | PreTrainedModel | None = None, config: str | PreTrainedConfig | None = None, tokenizer: str | PreTrainedTokenizer | PreTrainedTokenizerFast | None = None, feature_extractor: str | PreTrainedFeatureExtractor | None = None, image_processor: str | BaseImageProcessor | None = None, processor: str | ProcessorMixin | None = None, revision: str | None = None, use_fast: bool = True, token: str | bool | None = None, device: int | str | torch.device | None = None, device_map: str | dict[str, int | str] | None = None, dtype: str | torch.dtype | None = "auto", trust_remote_code: bool | None = None, model_kwargs: dict[str, Any] | None = None, pipeline_class: Any | None = None, **kwargs: Any) -> VisualQuestionAnsweringPipeline: ...
|
|
@@ -565,21 +520,16 @@ def pipeline(
|
|
|
565
520
|
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
|
|
566
521
|
- `"image-text-to-text"`: will return a [`ImageTextToTextPipeline`].
|
|
567
522
|
- `"image-to-image"`: will return a [`ImageToImagePipeline`].
|
|
568
|
-
- `"image-to-text"`: will return a [`ImageToTextPipeline`].
|
|
569
523
|
- `"keypoint-matching"`: will return a [`KeypointMatchingPipeline`].
|
|
570
524
|
- `"mask-generation"`: will return a [`MaskGenerationPipeline`].
|
|
571
525
|
- `"object-detection"`: will return a [`ObjectDetectionPipeline`].
|
|
572
526
|
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
|
|
573
|
-
- `"summarization"`: will return a [`SummarizationPipeline`].
|
|
574
527
|
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
|
|
575
|
-
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
|
|
576
528
|
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
|
|
577
529
|
[`TextClassificationPipeline`].
|
|
578
530
|
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
|
|
579
531
|
- `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:.
|
|
580
532
|
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
|
|
581
|
-
- `"translation"`: will return a [`TranslationPipeline`].
|
|
582
|
-
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
|
|
583
533
|
- `"video-classification"`: will return a [`VideoClassificationPipeline`].
|
|
584
534
|
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
|
|
585
535
|
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
|
|
@@ -1057,16 +1007,6 @@ def pipeline(
|
|
|
1057
1007
|
else:
|
|
1058
1008
|
processor = None
|
|
1059
1009
|
|
|
1060
|
-
if task == "translation" and model.config.task_specific_params:
|
|
1061
|
-
for key in model.config.task_specific_params:
|
|
1062
|
-
if key.startswith("translation"):
|
|
1063
|
-
task = key
|
|
1064
|
-
warnings.warn(
|
|
1065
|
-
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
|
|
1066
|
-
UserWarning,
|
|
1067
|
-
)
|
|
1068
|
-
break
|
|
1069
|
-
|
|
1070
1010
|
if tokenizer is not None:
|
|
1071
1011
|
kwargs["tokenizer"] = tokenizer
|
|
1072
1012
|
|
|
@@ -386,7 +386,7 @@ class AnyToAnyPipeline(Pipeline):
|
|
|
386
386
|
text = inputs.pop("text")
|
|
387
387
|
|
|
388
388
|
# Feature extractor do not load audio files and expect a decode array
|
|
389
|
-
if "audio"
|
|
389
|
+
if inputs.get("audio", None) is not None and hasattr(self.processor, "feature_extractor"):
|
|
390
390
|
inputs["audio"] = self.processor.feature_extractor.fetch_audio(inputs["audio"])
|
|
391
391
|
|
|
392
392
|
# If batched text inputs, we set padding to True unless specified otherwise
|
|
@@ -480,10 +480,8 @@ class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
|
|
|
480
480
|
inputs,
|
|
481
481
|
sampling_rate=self.feature_extractor.sampling_rate,
|
|
482
482
|
return_tensors="pt",
|
|
483
|
-
return_token_timestamps=True,
|
|
484
483
|
return_attention_mask=True,
|
|
485
484
|
)
|
|
486
|
-
extra["num_frames"] = processed.pop("num_frames")
|
|
487
485
|
else:
|
|
488
486
|
processed = self.feature_extractor(
|
|
489
487
|
inputs,
|
transformers/pipelines/base.py
CHANGED
|
@@ -901,7 +901,7 @@ class Pipeline(_ScikitCompat, PushToHubMixin):
|
|
|
901
901
|
# Update the generation config with task specific params if they exist.
|
|
902
902
|
# NOTE: 1. `prefix` is pipeline-specific and doesn't exist in the generation config.
|
|
903
903
|
# 2. `task_specific_params` is a legacy feature and should be removed in a future version.
|
|
904
|
-
task_specific_params = self.model.config
|
|
904
|
+
task_specific_params = getattr(self.model.config, "task_specific_params", None)
|
|
905
905
|
if task_specific_params is not None and task in task_specific_params:
|
|
906
906
|
this_task_params = task_specific_params.get(task)
|
|
907
907
|
if "prefix" in this_task_params:
|
|
@@ -152,8 +152,12 @@ class TextToAudioPipeline(Pipeline):
|
|
|
152
152
|
|
|
153
153
|
if self.model.config.model_type == "bark":
|
|
154
154
|
# bark Tokenizer is called with BarkProcessor which uses those kwargs
|
|
155
|
+
# Check if generation_config has semantic_config (BarkGenerationConfig) or use default
|
|
156
|
+
max_length = 256
|
|
157
|
+
if hasattr(self.generation_config, "semantic_config"):
|
|
158
|
+
max_length = getattr(self.generation_config.semantic_config, "max_input_semantic_length", 256)
|
|
155
159
|
new_kwargs = {
|
|
156
|
-
"max_length":
|
|
160
|
+
"max_length": max_length,
|
|
157
161
|
"add_special_tokens": False,
|
|
158
162
|
"return_attention_mask": True,
|
|
159
163
|
"return_token_type_ids": False,
|
transformers/processing_utils.py
CHANGED
|
@@ -508,22 +508,7 @@ class TokenizerChatTemplateKwargs(TypedDict, total=False):
|
|
|
508
508
|
return_assistant_tokens_mask: bool | None = False
|
|
509
509
|
|
|
510
510
|
|
|
511
|
-
class
|
|
512
|
-
"""
|
|
513
|
-
Keyword arguments used to load multimodal data in processor chat templates.
|
|
514
|
-
|
|
515
|
-
num_frames (`int`, *optional*):
|
|
516
|
-
Number of frames to sample uniformly. If not passed, the whole video is loaded.
|
|
517
|
-
load_audio_from_video (`bool`, *optional*):
|
|
518
|
-
Whether to use the audio track of input video. If `True` the audio track will be loaded and passed to the
|
|
519
|
-
processor. This flag has no effect if the model doesn't support audio modality.
|
|
520
|
-
"""
|
|
521
|
-
|
|
522
|
-
sampling_rate: int | None = 16_000
|
|
523
|
-
load_audio_from_video: bool | None = False
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
class ProcessorChatTemplateKwargs(ChatTemplateLoadKwargs, TokenizerChatTemplateKwargs, total=False):
|
|
511
|
+
class ProcessorChatTemplateKwargs(TokenizerChatTemplateKwargs, total=False):
|
|
527
512
|
"""
|
|
528
513
|
Keyword arguments for processor's `apply_chat_template`.
|
|
529
514
|
|
|
@@ -531,15 +516,18 @@ class ProcessorChatTemplateKwargs(ChatTemplateLoadKwargs, TokenizerChatTemplateK
|
|
|
531
516
|
Whether to tokenize the output or not.
|
|
532
517
|
return_dict (`bool`, defaults to `False`):
|
|
533
518
|
Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
|
|
519
|
+
load_audio_from_video (`bool`, *optional*, defaults to `False`):
|
|
520
|
+
Whether to use the audio track of input video. If `True` the audio track will be loaded and passed to the
|
|
521
|
+
processor. This flag has no effect if the model doesn't support audio modality.
|
|
534
522
|
"""
|
|
535
523
|
|
|
536
524
|
tokenize: bool | None = False
|
|
537
525
|
return_dict: bool | None = False
|
|
526
|
+
load_audio_from_video: bool | None = False
|
|
538
527
|
|
|
539
528
|
|
|
540
529
|
class AllKwargsForChatTemplate(TypedDict, total=False):
|
|
541
530
|
processor_kwargs: ProcessingKwargs
|
|
542
|
-
mm_load_kwargs: ChatTemplateLoadKwargs
|
|
543
531
|
template_kwargs: ProcessorChatTemplateKwargs
|
|
544
532
|
|
|
545
533
|
|
|
@@ -1233,7 +1221,8 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1233
1221
|
|
|
1234
1222
|
"""
|
|
1235
1223
|
# holding a copy to avoid mutating user-provided arguments
|
|
1236
|
-
|
|
1224
|
+
# Use deepcopy to also copy nested dicts (like videos_kwargs) that will be modified via pop()
|
|
1225
|
+
kwargs = copy.deepcopy(kwargs)
|
|
1237
1226
|
|
|
1238
1227
|
# Initialize dictionaries
|
|
1239
1228
|
output_kwargs = {
|
|
@@ -1520,7 +1509,9 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1520
1509
|
if "PixtralProcessor" in cls.__name__:
|
|
1521
1510
|
from .tokenization_utils_tokenizers import TokenizersBackend
|
|
1522
1511
|
|
|
1523
|
-
tokenizer = TokenizersBackend.from_pretrained(
|
|
1512
|
+
tokenizer = TokenizersBackend.from_pretrained(
|
|
1513
|
+
pretrained_model_name_or_path, subfolder=subfolder, **kwargs
|
|
1514
|
+
)
|
|
1524
1515
|
else:
|
|
1525
1516
|
tokenizer = cls._load_tokenizer_from_pretrained(
|
|
1526
1517
|
sub_processor_type, pretrained_model_name_or_path, subfolder=subfolder, **kwargs
|
|
@@ -1708,22 +1699,25 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1708
1699
|
else:
|
|
1709
1700
|
kwargs["return_offsets_mapping"] = True # force offset mapping so we can infer token boundaries
|
|
1710
1701
|
|
|
1711
|
-
# Fill sets of kwargs that should be used by
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
default_value = getattr(kwarg_type_defaults, key, None)
|
|
1721
|
-
value = kwargs.pop(key, default_value)
|
|
1722
|
-
if value is not None and not isinstance(value, dict):
|
|
1723
|
-
processed_kwargs[kwarg_type][key] = value
|
|
1702
|
+
# Fill sets of kwargs that should be used by jinja template, filtering out kwargs used in `processor.__call__`
|
|
1703
|
+
# NOTE: we don't only filter but also set the default values here. Without default values, we can remove it
|
|
1704
|
+
template_kwargs = {}
|
|
1705
|
+
for key in AllKwargsForChatTemplate.__annotations__["template_kwargs"].__annotations__:
|
|
1706
|
+
kwarg_type_defaults = AllKwargsForChatTemplate.__annotations__["template_kwargs"]
|
|
1707
|
+
default_value = getattr(kwarg_type_defaults, key, None)
|
|
1708
|
+
value = kwargs.pop(key, default_value)
|
|
1709
|
+
if value is not None and not isinstance(value, dict):
|
|
1710
|
+
template_kwargs[key] = value
|
|
1724
1711
|
|
|
1725
1712
|
# Pass unprocessed custom kwargs
|
|
1726
|
-
|
|
1713
|
+
template_kwargs.update(kwargs)
|
|
1714
|
+
|
|
1715
|
+
# Set the sampling rate to load the audio files if user hasn't already passed with `kwargs`
|
|
1716
|
+
if "sampling_rate" not in template_kwargs:
|
|
1717
|
+
if hasattr(self, "feature_extractor") and hasattr(self.feature_extractor, "sampling_rate"):
|
|
1718
|
+
template_kwargs["sampling_rate"] = self.feature_extractor.sampling_rate
|
|
1719
|
+
else:
|
|
1720
|
+
template_kwargs["sampling_rate"] = 16_000
|
|
1727
1721
|
|
|
1728
1722
|
if isinstance(conversation, (list, tuple)) and (
|
|
1729
1723
|
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "content")
|
|
@@ -1734,9 +1728,8 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1734
1728
|
is_batched = False
|
|
1735
1729
|
conversations = [conversation]
|
|
1736
1730
|
|
|
1737
|
-
tokenize =
|
|
1738
|
-
return_dict =
|
|
1739
|
-
mm_load_kwargs = processed_kwargs["mm_load_kwargs"]
|
|
1731
|
+
tokenize = template_kwargs.pop("tokenize", False)
|
|
1732
|
+
return_dict = template_kwargs.pop("return_dict", True)
|
|
1740
1733
|
|
|
1741
1734
|
if tokenize:
|
|
1742
1735
|
batch_images, batch_videos = [], []
|
|
@@ -1767,12 +1760,12 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1767
1760
|
videos.extend(video_fnames)
|
|
1768
1761
|
|
|
1769
1762
|
# Audio models do not accept nested list of audios (yet!) so we construct a flat input audio list
|
|
1770
|
-
if not
|
|
1763
|
+
if not template_kwargs["load_audio_from_video"]:
|
|
1771
1764
|
for fname in audio_fnames:
|
|
1772
|
-
batch_audios.append(load_audio(fname, sampling_rate=
|
|
1765
|
+
batch_audios.append(load_audio(fname, sampling_rate=template_kwargs["sampling_rate"]))
|
|
1773
1766
|
else:
|
|
1774
1767
|
for fname in video_fnames:
|
|
1775
|
-
batch_audios.append(load_audio(fname, sampling_rate=
|
|
1768
|
+
batch_audios.append(load_audio(fname, sampling_rate=template_kwargs["sampling_rate"]))
|
|
1776
1769
|
|
|
1777
1770
|
# Currently all processors can accept nested list of batches, but not flat list of visuals
|
|
1778
1771
|
# So we'll make a batched list of images and let the processor handle it
|
|
@@ -1783,14 +1776,12 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1783
1776
|
if hasattr(self, "tokenizer") and hasattr(self.tokenizer, "special_tokens_map"):
|
|
1784
1777
|
special_tokens = self.tokenizer.special_tokens_map
|
|
1785
1778
|
# Filter out tokens that conflict with template kwargs
|
|
1786
|
-
special_tokens_map = {
|
|
1787
|
-
k: v for k, v in special_tokens.items() if k not in processed_kwargs["template_kwargs"]
|
|
1788
|
-
}
|
|
1779
|
+
special_tokens_map = {k: v for k, v in special_tokens.items() if k not in template_kwargs}
|
|
1789
1780
|
|
|
1790
1781
|
prompt, generation_indices = render_jinja_template(
|
|
1791
1782
|
conversations=conversations,
|
|
1792
1783
|
chat_template=chat_template,
|
|
1793
|
-
**
|
|
1784
|
+
**template_kwargs, # different flags such as `return_assistant_mask`
|
|
1794
1785
|
**special_tokens_map, # tokenizer special tokens are used by some templates
|
|
1795
1786
|
)
|
|
1796
1787
|
|
|
@@ -1826,7 +1817,7 @@ class ProcessorMixin(PushToHubMixin):
|
|
|
1826
1817
|
)
|
|
1827
1818
|
|
|
1828
1819
|
if return_dict:
|
|
1829
|
-
if
|
|
1820
|
+
if template_kwargs.get("return_assistant_tokens_mask", False):
|
|
1830
1821
|
assistant_masks = []
|
|
1831
1822
|
offset_mapping = out.pop("offset_mapping")
|
|
1832
1823
|
input_ids = out["input_ids"]
|
transformers/pytorch_utils.py
CHANGED
|
@@ -35,10 +35,10 @@ logger = logging.get_logger(__name__)
|
|
|
35
35
|
|
|
36
36
|
is_torch_greater_or_equal_than_2_8 = is_torch_greater_or_equal("2.8", accept_dev=True)
|
|
37
37
|
is_torch_greater_or_equal_than_2_6 = is_torch_greater_or_equal("2.6", accept_dev=True)
|
|
38
|
-
is_torch_greater_or_equal_than_2_4 = is_torch_greater_or_equal("2.4", accept_dev=True)
|
|
39
|
-
is_torch_greater_or_equal_than_2_3 = is_torch_greater_or_equal("2.3", accept_dev=True)
|
|
40
38
|
|
|
41
39
|
# For backwards compatibility (e.g. some remote codes on Hub using those variables).
|
|
40
|
+
is_torch_greater_or_equal_than_2_4 = is_torch_greater_or_equal("2.4", accept_dev=True)
|
|
41
|
+
is_torch_greater_or_equal_than_2_3 = is_torch_greater_or_equal("2.3", accept_dev=True)
|
|
42
42
|
is_torch_greater_or_equal_than_2_2 = is_torch_greater_or_equal("2.2", accept_dev=True)
|
|
43
43
|
is_torch_greater_or_equal_than_2_1 = is_torch_greater_or_equal("2.1", accept_dev=True)
|
|
44
44
|
is_torch_greater_or_equal_than_2_0 = is_torch_greater_or_equal("2.0", accept_dev=True)
|
|
@@ -238,30 +238,6 @@ def id_tensor_storage(tensor: torch.Tensor) -> tuple[torch.device, int, int]:
|
|
|
238
238
|
return tensor.device, unique_id, storage_size(tensor)
|
|
239
239
|
|
|
240
240
|
|
|
241
|
-
def isin_mps_friendly(elements: torch.Tensor, test_elements: torch.Tensor | int) -> torch.Tensor:
|
|
242
|
-
"""
|
|
243
|
-
Same as `torch.isin` without flags, but MPS-friendly. We can remove this function when we stop supporting
|
|
244
|
-
torch <= 2.3. See https://github.com/pytorch/pytorch/issues/77764#issuecomment-2067838075
|
|
245
|
-
|
|
246
|
-
Args:
|
|
247
|
-
elements (`torch.Tensor`): Input elements
|
|
248
|
-
test_elements (`torch.Tensor` or `int`): The elements to check against.
|
|
249
|
-
|
|
250
|
-
Returns:
|
|
251
|
-
`torch.Tensor`: A boolean tensor of the same shape as `elements` that is True for `elements` in `test_elements`
|
|
252
|
-
and False otherwise
|
|
253
|
-
"""
|
|
254
|
-
|
|
255
|
-
if elements.device.type == "mps" and not is_torch_greater_or_equal_than_2_4:
|
|
256
|
-
test_elements = torch.tensor(test_elements)
|
|
257
|
-
if test_elements.ndim == 0:
|
|
258
|
-
test_elements = test_elements.unsqueeze(0)
|
|
259
|
-
return elements.tile(test_elements.shape[0], 1).eq(test_elements.unsqueeze(1)).sum(dim=0).bool().squeeze()
|
|
260
|
-
else:
|
|
261
|
-
# Note: don't use named arguments in `torch.isin`, see https://github.com/pytorch/pytorch/issues/126045
|
|
262
|
-
return torch.isin(elements, test_elements)
|
|
263
|
-
|
|
264
|
-
|
|
265
241
|
@wraps(lru_cache)
|
|
266
242
|
def compile_compatible_method_lru_cache(*lru_args, **lru_kwargs):
|
|
267
243
|
"""
|
|
@@ -84,13 +84,15 @@ class CompressedTensorsHfQuantizer(HfQuantizer):
|
|
|
84
84
|
) or self.quantization_config.is_sparsification_compressed:
|
|
85
85
|
self.compressor.decompress_model(model=model)
|
|
86
86
|
|
|
87
|
+
# NOTE: TP plan override for compressed tensors removed - unsupported styles were used.
|
|
88
|
+
# TODO: Implement proper TP support for compressed tensors quantization
|
|
87
89
|
def update_tp_plan(self, config):
|
|
88
90
|
additional_plan = {
|
|
89
|
-
"layers.*.feed_forward.experts.*.gate_proj.weight": "
|
|
90
|
-
"layers.*.feed_forward.experts.*.gate_proj.weight_scale": "
|
|
91
|
-
"layers.*.feed_forward.experts.*.up_proj.weight": "
|
|
92
|
-
"layers.*.feed_forward.experts.*.up_proj.weight_scale": "
|
|
93
|
-
"layers.*.feed_forward.experts.*.down_proj.weight": "
|
|
91
|
+
"layers.*.feed_forward.experts.*.gate_proj.weight": "colwise",
|
|
92
|
+
"layers.*.feed_forward.experts.*.gate_proj.weight_scale": "colwise",
|
|
93
|
+
"layers.*.feed_forward.experts.*.up_proj.weight": "colwise",
|
|
94
|
+
"layers.*.feed_forward.experts.*.up_proj.weight_scale": "colwise",
|
|
95
|
+
"layers.*.feed_forward.experts.*.down_proj.weight": "rowwise",
|
|
94
96
|
}
|
|
95
97
|
if config.get_text_config() is not None and config.get_text_config().base_model_tp_plan is not None:
|
|
96
98
|
config.get_text_config().base_model_tp_plan.update(additional_plan)
|
|
@@ -133,14 +133,13 @@ class FbgemmFp8HfQuantizer(HfQuantizer):
|
|
|
133
133
|
# We are using a different tp plan with local_colwise and local_rowwise for the attention because fbgemm operations cannot be parallelized
|
|
134
134
|
# With local_colwise and local_rowwise, all the operations are done locally, and we add a gather operation to gather the results instead of
|
|
135
135
|
# using dtensors
|
|
136
|
-
"layers.*.self_attn.q_proj.weight": "
|
|
137
|
-
"layers.*.self_attn.q_proj.weight_scale": "
|
|
138
|
-
"layers.*.self_attn.k_proj.weight": "
|
|
139
|
-
"layers.*.self_attn.k_proj.weight_scale": "
|
|
140
|
-
"layers.*.self_attn.v_proj.weight": "
|
|
141
|
-
"layers.*.self_attn.v_proj.weight_scale": "
|
|
142
|
-
"layers.*.self_attn.o_proj.weight": "
|
|
143
|
-
"layers.*.self_attn": "gather",
|
|
136
|
+
"layers.*.self_attn.q_proj.weight": "colwise",
|
|
137
|
+
"layers.*.self_attn.q_proj.weight_scale": "colwise",
|
|
138
|
+
"layers.*.self_attn.k_proj.weight": "colwise",
|
|
139
|
+
"layers.*.self_attn.k_proj.weight_scale": "colwise",
|
|
140
|
+
"layers.*.self_attn.v_proj.weight": "colwise",
|
|
141
|
+
"layers.*.self_attn.v_proj.weight_scale": "colwise",
|
|
142
|
+
"layers.*.self_attn.o_proj.weight": "rowwise",
|
|
144
143
|
# We keep the same sequence_parallel plan for layernorms
|
|
145
144
|
"layers.*.input_layernorm.weight": "sequence_parallel",
|
|
146
145
|
"layers.*.post_attention_layernorm.weight": "sequence_parallel",
|
|
@@ -148,23 +147,21 @@ class FbgemmFp8HfQuantizer(HfQuantizer):
|
|
|
148
147
|
# We keep the same local_colwise and local_rowwise plan for the feed forward shared expert
|
|
149
148
|
# We also add scales for the shared expert, for local_colwise the scale is also local_colwise
|
|
150
149
|
# For local_rowwise the scale is replicated, so we don't need to add it
|
|
151
|
-
"layers.*.feed_forward.shared_expert.gate_proj.weight": "
|
|
152
|
-
"layers.*.feed_forward.shared_expert.gate_proj.weight_scale": "
|
|
153
|
-
"layers.*.feed_forward.shared_expert.up_proj.weight": "
|
|
154
|
-
"layers.*.feed_forward.shared_expert.up_proj.weight_scale": "
|
|
155
|
-
"layers.*.feed_forward.shared_expert.down_proj.weight": "
|
|
156
|
-
"layers.*.feed_forward.experts": "
|
|
157
|
-
"layers.*.feed_forward": "
|
|
158
|
-
"layers.*.feed_forward.experts.*.
|
|
159
|
-
"layers.*.feed_forward.experts.*.
|
|
160
|
-
"layers.*.feed_forward.experts.*.
|
|
161
|
-
"layers.*.feed_forward.experts.*.up_proj.weight_scale": "local_colwise",
|
|
162
|
-
"layers.*.feed_forward.experts.*.down_proj.weight": "local_rowwise",
|
|
150
|
+
"layers.*.feed_forward.shared_expert.gate_proj.weight": "colwise",
|
|
151
|
+
"layers.*.feed_forward.shared_expert.gate_proj.weight_scale": "colwise",
|
|
152
|
+
"layers.*.feed_forward.shared_expert.up_proj.weight": "colwise",
|
|
153
|
+
"layers.*.feed_forward.shared_expert.up_proj.weight_scale": "colwise",
|
|
154
|
+
"layers.*.feed_forward.shared_expert.down_proj.weight": "rowwise",
|
|
155
|
+
"layers.*.feed_forward.experts.*.gate_proj.weight": "colwise",
|
|
156
|
+
"layers.*.feed_forward.experts.*.gate_proj.weight_scale": "colwise",
|
|
157
|
+
"layers.*.feed_forward.experts.*.up_proj.weight": "colwise",
|
|
158
|
+
"layers.*.feed_forward.experts.*.up_proj.weight_scale": "colwise",
|
|
159
|
+
"layers.*.feed_forward.experts.*.down_proj.weight": "rowwise",
|
|
163
160
|
# For Fused implementation we use local_packed_rowwise for the gate_up_proj, and the same for the packed scales
|
|
164
161
|
# We use local_colwise for the down_proj, and the scales are replicated so we don't add them
|
|
165
|
-
"layers.*.feed_forward.experts.gate_up_proj": "
|
|
166
|
-
"layers.*.feed_forward.experts.gate_up_proj_scale": "
|
|
167
|
-
"layers.*.feed_forward.experts.down_proj": "
|
|
162
|
+
"layers.*.feed_forward.experts.gate_up_proj": "packed_rowwise",
|
|
163
|
+
"layers.*.feed_forward.experts.gate_up_proj_scale": "packed_rowwise",
|
|
164
|
+
"layers.*.feed_forward.experts.down_proj": "colwise",
|
|
168
165
|
}
|
|
169
166
|
if config.get_text_config() is not None:
|
|
170
167
|
config.get_text_config().base_model_tp_plan = text_plan
|
|
@@ -110,29 +110,23 @@ class FineGrainedFP8HfQuantizer(HfQuantizer):
|
|
|
110
110
|
pre_quantized=self.pre_quantized,
|
|
111
111
|
)
|
|
112
112
|
|
|
113
|
-
# NOTE: TP is applied before quantization so this is only to add hooks.
|
|
114
|
-
# Quantization is incompatible with DTensors, so we have to anyway have
|
|
115
|
-
# gathers! But it should be model independent -> figure out where to put
|
|
116
|
-
# the gather and that's it.
|
|
117
113
|
def update_tp_plan(self, config):
|
|
118
114
|
if "Qwen3" in config.__class__.__name__:
|
|
119
115
|
text_plan = {
|
|
120
|
-
"layers.*.self_attn.q_proj.weight": "
|
|
121
|
-
"layers.*.self_attn.q_proj.weight_scale_inv": "
|
|
122
|
-
"layers.*.self_attn.k_proj.weight": "
|
|
123
|
-
"layers.*.self_attn.k_proj.weight_scale_inv": "
|
|
124
|
-
"layers.*.self_attn.v_proj.weight": "
|
|
125
|
-
"layers.*.self_attn.v_proj.weight_scale_inv": "
|
|
126
|
-
"layers.*.self_attn.o_proj.weight": "
|
|
127
|
-
"layers.*.self_attn.o_proj.weight_scale_inv": "
|
|
128
|
-
"layers.*.
|
|
129
|
-
"layers.*.mlp.gate_proj.
|
|
130
|
-
"layers.*.mlp.
|
|
131
|
-
"layers.*.mlp.up_proj.
|
|
132
|
-
"layers.*.mlp.
|
|
133
|
-
"layers.*.mlp.down_proj.
|
|
134
|
-
"layers.*.mlp.down_proj.weight_scale_inv": "local_rowwise",
|
|
135
|
-
"layers.*.mlp": "gather",
|
|
116
|
+
"layers.*.self_attn.q_proj.weight": "colwise",
|
|
117
|
+
"layers.*.self_attn.q_proj.weight_scale_inv": "colwise",
|
|
118
|
+
"layers.*.self_attn.k_proj.weight": "colwise",
|
|
119
|
+
"layers.*.self_attn.k_proj.weight_scale_inv": "colwise",
|
|
120
|
+
"layers.*.self_attn.v_proj.weight": "colwise",
|
|
121
|
+
"layers.*.self_attn.v_proj.weight_scale_inv": "colwise",
|
|
122
|
+
"layers.*.self_attn.o_proj.weight": "rowwise",
|
|
123
|
+
"layers.*.self_attn.o_proj.weight_scale_inv": "rowwise",
|
|
124
|
+
"layers.*.mlp.gate_proj.weight": "colwise",
|
|
125
|
+
"layers.*.mlp.gate_proj.weight_scale_inv": "colwise",
|
|
126
|
+
"layers.*.mlp.up_proj.weight": "colwise",
|
|
127
|
+
"layers.*.mlp.up_proj.weight_scale_inv": "colwise",
|
|
128
|
+
"layers.*.mlp.down_proj.weight": "rowwise",
|
|
129
|
+
"layers.*.mlp.down_proj.weight_scale_inv": "rowwise",
|
|
136
130
|
}
|
|
137
131
|
|
|
138
132
|
config.base_model_tp_plan = text_plan
|
|
@@ -55,7 +55,7 @@ class Mxfp4HfQuantizer(HfQuantizer):
|
|
|
55
55
|
try:
|
|
56
56
|
from ..integrations.hub_kernels import get_kernel
|
|
57
57
|
|
|
58
|
-
self.triton_kernels_hub = get_kernel("kernels-community/
|
|
58
|
+
self.triton_kernels_hub = get_kernel("kernels-community/gpt-oss-triton-kernels")
|
|
59
59
|
except ImportError:
|
|
60
60
|
raise ImportError("kernels package is required for MXFP4 quantization")
|
|
61
61
|
return self.triton_kernels_hub
|
|
@@ -181,9 +181,6 @@ class TorchAoHfQuantizer(HfQuantizer):
|
|
|
181
181
|
self.set_metadata(checkpoint_files)
|
|
182
182
|
|
|
183
183
|
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
|
|
184
|
-
if self.quantization_config.quant_type == "autoquant":
|
|
185
|
-
return False
|
|
186
|
-
|
|
187
184
|
# check if the param_name is not in self.modules_to_not_convert
|
|
188
185
|
if not should_convert_module(param_name, self.modules_to_not_convert):
|
|
189
186
|
return False
|
|
@@ -213,19 +210,6 @@ class TorchAoHfQuantizer(HfQuantizer):
|
|
|
213
210
|
return isinstance(module, tuple(_QUANTIZABLE)) and tensor_name == "weight"
|
|
214
211
|
|
|
215
212
|
def _process_model_after_weight_loading(self, model, **kwargs):
|
|
216
|
-
"""No process required for torchao quantized model"""
|
|
217
|
-
if self.quantization_config.quant_type == "autoquant":
|
|
218
|
-
from torchao import autoquant
|
|
219
|
-
from torchao.quantization import ALL_AUTOQUANT_CLASS_LIST
|
|
220
|
-
|
|
221
|
-
model = torch.compile(model, mode="max-autotune")
|
|
222
|
-
model = autoquant(
|
|
223
|
-
model,
|
|
224
|
-
qtensor_class_list=ALL_AUTOQUANT_CLASS_LIST,
|
|
225
|
-
set_inductor_config=False,
|
|
226
|
-
**self.quantization_config.quant_type_kwargs,
|
|
227
|
-
)
|
|
228
|
-
return model
|
|
229
213
|
return
|
|
230
214
|
|
|
231
215
|
def is_serializable(self) -> bool:
|