transformers 5.0.0rc2__py3-none-any.whl → 5.0.0rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- transformers/__init__.py +9 -28
- transformers/audio_utils.py +32 -32
- transformers/cache_utils.py +15 -124
- transformers/cli/chat.py +3 -3
- transformers/cli/serve.py +2 -2
- transformers/cli/transformers.py +2 -1
- transformers/configuration_utils.py +31 -33
- transformers/conversion_mapping.py +5 -1
- transformers/convert_slow_tokenizer.py +3 -8
- transformers/core_model_loading.py +14 -15
- transformers/data/processors/glue.py +0 -1
- transformers/data/processors/utils.py +0 -1
- transformers/data/processors/xnli.py +0 -1
- transformers/dependency_versions_table.py +4 -4
- transformers/distributed/configuration_utils.py +1 -2
- transformers/dynamic_module_utils.py +23 -23
- transformers/feature_extraction_sequence_utils.py +19 -23
- transformers/feature_extraction_utils.py +14 -14
- transformers/generation/candidate_generator.py +1 -2
- transformers/generation/configuration_utils.py +54 -39
- transformers/generation/continuous_batching/__init__.py +0 -1
- transformers/generation/continuous_batching/cache.py +34 -6
- transformers/generation/continuous_batching/cache_manager.py +25 -12
- transformers/generation/continuous_batching/continuous_api.py +54 -23
- transformers/generation/continuous_batching/requests.py +25 -4
- transformers/generation/continuous_batching/scheduler.py +117 -49
- transformers/generation/logits_process.py +0 -128
- transformers/generation/streamers.py +0 -1
- transformers/generation/utils.py +16 -26
- transformers/generation/watermarking.py +2 -3
- transformers/hf_argparser.py +9 -13
- transformers/hyperparameter_search.py +1 -2
- transformers/image_processing_base.py +9 -9
- transformers/image_processing_utils.py +11 -12
- transformers/image_processing_utils_fast.py +53 -53
- transformers/image_transforms.py +29 -29
- transformers/image_utils.py +30 -32
- transformers/integrations/awq.py +1 -3
- transformers/integrations/deepspeed.py +1 -1
- transformers/integrations/eetq.py +0 -1
- transformers/integrations/fbgemm_fp8.py +1 -2
- transformers/integrations/finegrained_fp8.py +8 -7
- transformers/integrations/flash_attention.py +1 -1
- transformers/integrations/flex_attention.py +1 -1
- transformers/integrations/fp_quant.py +4 -6
- transformers/integrations/ggml.py +0 -1
- transformers/integrations/integration_utils.py +2 -3
- transformers/integrations/mxfp4.py +5 -6
- transformers/integrations/quark.py +2 -4
- transformers/integrations/torchao.py +4 -6
- transformers/loss/loss_lw_detr.py +356 -0
- transformers/loss/loss_utils.py +2 -0
- transformers/masking_utils.py +47 -51
- transformers/model_debugging_utils.py +4 -5
- transformers/modelcard.py +14 -192
- transformers/modeling_attn_mask_utils.py +19 -19
- transformers/modeling_flash_attention_utils.py +27 -27
- transformers/modeling_gguf_pytorch_utils.py +5 -5
- transformers/modeling_layers.py +21 -22
- transformers/modeling_outputs.py +242 -253
- transformers/modeling_rope_utils.py +32 -32
- transformers/modeling_utils.py +67 -90
- transformers/models/__init__.py +4 -0
- transformers/models/afmoe/configuration_afmoe.py +26 -29
- transformers/models/afmoe/modeling_afmoe.py +30 -33
- transformers/models/afmoe/modular_afmoe.py +16 -18
- transformers/models/aimv2/configuration_aimv2.py +2 -5
- transformers/models/aimv2/modeling_aimv2.py +20 -21
- transformers/models/aimv2/modular_aimv2.py +7 -9
- transformers/models/albert/configuration_albert.py +0 -1
- transformers/models/albert/modeling_albert.py +67 -69
- transformers/models/albert/tokenization_albert.py +1 -4
- transformers/models/align/configuration_align.py +0 -1
- transformers/models/align/modeling_align.py +61 -62
- transformers/models/align/processing_align.py +2 -30
- transformers/models/altclip/configuration_altclip.py +0 -1
- transformers/models/altclip/modeling_altclip.py +76 -77
- transformers/models/altclip/processing_altclip.py +2 -15
- transformers/models/apertus/__init__.py +0 -1
- transformers/models/apertus/configuration_apertus.py +18 -21
- transformers/models/apertus/modeling_apertus.py +31 -34
- transformers/models/apertus/modular_apertus.py +28 -30
- transformers/models/arcee/configuration_arcee.py +20 -23
- transformers/models/arcee/modeling_arcee.py +31 -34
- transformers/models/arcee/modular_arcee.py +20 -23
- transformers/models/aria/configuration_aria.py +20 -23
- transformers/models/aria/image_processing_aria.py +25 -27
- transformers/models/aria/modeling_aria.py +63 -66
- transformers/models/aria/modular_aria.py +78 -85
- transformers/models/aria/processing_aria.py +28 -35
- transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +0 -1
- transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +3 -6
- transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +6 -8
- transformers/models/audioflamingo3/__init__.py +0 -1
- transformers/models/audioflamingo3/configuration_audioflamingo3.py +0 -1
- transformers/models/audioflamingo3/modeling_audioflamingo3.py +22 -23
- transformers/models/audioflamingo3/modular_audioflamingo3.py +12 -17
- transformers/models/audioflamingo3/processing_audioflamingo3.py +6 -8
- transformers/models/auto/auto_factory.py +4 -5
- transformers/models/auto/configuration_auto.py +26 -5
- transformers/models/auto/feature_extraction_auto.py +5 -7
- transformers/models/auto/image_processing_auto.py +13 -26
- transformers/models/auto/modeling_auto.py +18 -199
- transformers/models/auto/processing_auto.py +2 -1
- transformers/models/auto/tokenization_auto.py +21 -22
- transformers/models/auto/video_processing_auto.py +7 -8
- transformers/models/autoformer/configuration_autoformer.py +4 -7
- transformers/models/autoformer/modeling_autoformer.py +98 -100
- transformers/models/aya_vision/configuration_aya_vision.py +0 -1
- transformers/models/aya_vision/modeling_aya_vision.py +35 -37
- transformers/models/aya_vision/modular_aya_vision.py +26 -29
- transformers/models/aya_vision/processing_aya_vision.py +25 -53
- transformers/models/bamba/configuration_bamba.py +29 -32
- transformers/models/bamba/modeling_bamba.py +60 -64
- transformers/models/bamba/modular_bamba.py +51 -55
- transformers/models/bark/configuration_bark.py +4 -7
- transformers/models/bark/generation_configuration_bark.py +3 -5
- transformers/models/bark/modeling_bark.py +40 -55
- transformers/models/bark/processing_bark.py +19 -41
- transformers/models/bart/configuration_bart.py +0 -1
- transformers/models/bart/modeling_bart.py +115 -117
- transformers/models/barthez/tokenization_barthez.py +1 -4
- transformers/models/bartpho/tokenization_bartpho.py +6 -7
- transformers/models/beit/configuration_beit.py +0 -11
- transformers/models/beit/image_processing_beit.py +53 -56
- transformers/models/beit/image_processing_beit_fast.py +8 -9
- transformers/models/beit/modeling_beit.py +51 -53
- transformers/models/bert/configuration_bert.py +0 -1
- transformers/models/bert/modeling_bert.py +111 -122
- transformers/models/bert/tokenization_bert.py +2 -4
- transformers/models/bert/tokenization_bert_legacy.py +3 -5
- transformers/models/bert_generation/configuration_bert_generation.py +0 -1
- transformers/models/bert_generation/modeling_bert_generation.py +47 -49
- transformers/models/bert_generation/tokenization_bert_generation.py +2 -3
- transformers/models/bert_japanese/tokenization_bert_japanese.py +5 -6
- transformers/models/bertweet/tokenization_bertweet.py +1 -3
- transformers/models/big_bird/configuration_big_bird.py +0 -1
- transformers/models/big_bird/modeling_big_bird.py +107 -109
- transformers/models/big_bird/tokenization_big_bird.py +1 -4
- transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +0 -1
- transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +109 -111
- transformers/models/biogpt/configuration_biogpt.py +0 -1
- transformers/models/biogpt/modeling_biogpt.py +69 -71
- transformers/models/biogpt/modular_biogpt.py +59 -61
- transformers/models/biogpt/tokenization_biogpt.py +3 -5
- transformers/models/bit/configuration_bit.py +0 -1
- transformers/models/bit/image_processing_bit.py +21 -24
- transformers/models/bit/image_processing_bit_fast.py +0 -1
- transformers/models/bit/modeling_bit.py +9 -11
- transformers/models/bitnet/configuration_bitnet.py +18 -21
- transformers/models/bitnet/modeling_bitnet.py +31 -34
- transformers/models/bitnet/modular_bitnet.py +4 -6
- transformers/models/blenderbot/configuration_blenderbot.py +0 -1
- transformers/models/blenderbot/modeling_blenderbot.py +64 -95
- transformers/models/blenderbot/tokenization_blenderbot.py +0 -1
- transformers/models/blenderbot_small/configuration_blenderbot_small.py +0 -1
- transformers/models/blenderbot_small/modeling_blenderbot_small.py +66 -68
- transformers/models/blenderbot_small/tokenization_blenderbot_small.py +1 -3
- transformers/models/blip/configuration_blip.py +0 -1
- transformers/models/blip/image_processing_blip.py +17 -20
- transformers/models/blip/image_processing_blip_fast.py +0 -1
- transformers/models/blip/modeling_blip.py +60 -71
- transformers/models/blip/modeling_blip_text.py +63 -65
- transformers/models/blip/processing_blip.py +5 -36
- transformers/models/blip_2/configuration_blip_2.py +0 -1
- transformers/models/blip_2/modeling_blip_2.py +70 -71
- transformers/models/blip_2/processing_blip_2.py +8 -38
- transformers/models/bloom/configuration_bloom.py +0 -1
- transformers/models/bloom/modeling_bloom.py +58 -59
- transformers/models/blt/configuration_blt.py +71 -74
- transformers/models/blt/modeling_blt.py +73 -76
- transformers/models/blt/modular_blt.py +57 -59
- transformers/models/bridgetower/configuration_bridgetower.py +0 -1
- transformers/models/bridgetower/image_processing_bridgetower.py +34 -35
- transformers/models/bridgetower/image_processing_bridgetower_fast.py +7 -8
- transformers/models/bridgetower/modeling_bridgetower.py +107 -109
- transformers/models/bridgetower/processing_bridgetower.py +2 -16
- transformers/models/bros/configuration_bros.py +0 -1
- transformers/models/bros/modeling_bros.py +78 -80
- transformers/models/bros/processing_bros.py +2 -12
- transformers/models/byt5/tokenization_byt5.py +4 -6
- transformers/models/camembert/configuration_camembert.py +0 -1
- transformers/models/camembert/modeling_camembert.py +91 -93
- transformers/models/camembert/modular_camembert.py +51 -54
- transformers/models/camembert/tokenization_camembert.py +1 -4
- transformers/models/canine/configuration_canine.py +0 -1
- transformers/models/canine/modeling_canine.py +73 -75
- transformers/models/canine/tokenization_canine.py +0 -1
- transformers/models/chameleon/configuration_chameleon.py +24 -27
- transformers/models/chameleon/image_processing_chameleon.py +21 -24
- transformers/models/chameleon/image_processing_chameleon_fast.py +0 -1
- transformers/models/chameleon/modeling_chameleon.py +53 -56
- transformers/models/chameleon/processing_chameleon.py +16 -41
- transformers/models/chinese_clip/configuration_chinese_clip.py +0 -1
- transformers/models/chinese_clip/image_processing_chinese_clip.py +21 -24
- transformers/models/chinese_clip/image_processing_chinese_clip_fast.py +0 -1
- transformers/models/chinese_clip/modeling_chinese_clip.py +65 -66
- transformers/models/chinese_clip/processing_chinese_clip.py +2 -15
- transformers/models/clap/configuration_clap.py +0 -1
- transformers/models/clap/feature_extraction_clap.py +9 -10
- transformers/models/clap/modeling_clap.py +88 -89
- transformers/models/clap/processing_clap.py +2 -15
- transformers/models/clip/configuration_clip.py +0 -1
- transformers/models/clip/image_processing_clip.py +21 -24
- transformers/models/clip/image_processing_clip_fast.py +0 -1
- transformers/models/clip/modeling_clip.py +45 -46
- transformers/models/clip/processing_clip.py +2 -14
- transformers/models/clip/tokenization_clip.py +2 -5
- transformers/models/clipseg/configuration_clipseg.py +0 -1
- transformers/models/clipseg/modeling_clipseg.py +86 -87
- transformers/models/clipseg/processing_clipseg.py +8 -39
- transformers/models/clvp/configuration_clvp.py +1 -3
- transformers/models/clvp/feature_extraction_clvp.py +7 -10
- transformers/models/clvp/modeling_clvp.py +119 -115
- transformers/models/clvp/number_normalizer.py +1 -2
- transformers/models/clvp/processing_clvp.py +3 -20
- transformers/models/clvp/tokenization_clvp.py +0 -1
- transformers/models/code_llama/tokenization_code_llama.py +3 -6
- transformers/models/codegen/configuration_codegen.py +0 -1
- transformers/models/codegen/modeling_codegen.py +48 -48
- transformers/models/codegen/tokenization_codegen.py +5 -6
- transformers/models/cohere/configuration_cohere.py +20 -23
- transformers/models/cohere/modeling_cohere.py +35 -38
- transformers/models/cohere/modular_cohere.py +24 -28
- transformers/models/cohere/tokenization_cohere.py +5 -6
- transformers/models/cohere2/configuration_cohere2.py +21 -24
- transformers/models/cohere2/modeling_cohere2.py +34 -37
- transformers/models/cohere2/modular_cohere2.py +39 -41
- transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +6 -7
- transformers/models/cohere2_vision/modeling_cohere2_vision.py +28 -30
- transformers/models/cohere2_vision/modular_cohere2_vision.py +21 -23
- transformers/models/cohere2_vision/processing_cohere2_vision.py +6 -36
- transformers/models/colpali/configuration_colpali.py +0 -1
- transformers/models/colpali/modeling_colpali.py +14 -16
- transformers/models/colpali/modular_colpali.py +11 -51
- transformers/models/colpali/processing_colpali.py +14 -52
- transformers/models/colqwen2/modeling_colqwen2.py +20 -22
- transformers/models/colqwen2/modular_colqwen2.py +29 -68
- transformers/models/colqwen2/processing_colqwen2.py +16 -52
- transformers/models/conditional_detr/configuration_conditional_detr.py +0 -1
- transformers/models/conditional_detr/image_processing_conditional_detr.py +64 -66
- transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +22 -22
- transformers/models/conditional_detr/modeling_conditional_detr.py +78 -80
- transformers/models/conditional_detr/modular_conditional_detr.py +1 -3
- transformers/models/convbert/configuration_convbert.py +0 -1
- transformers/models/convbert/modeling_convbert.py +85 -87
- transformers/models/convbert/tokenization_convbert.py +0 -1
- transformers/models/convnext/configuration_convnext.py +0 -1
- transformers/models/convnext/image_processing_convnext.py +18 -21
- transformers/models/convnext/image_processing_convnext_fast.py +5 -6
- transformers/models/convnext/modeling_convnext.py +5 -8
- transformers/models/convnextv2/configuration_convnextv2.py +0 -1
- transformers/models/convnextv2/modeling_convnextv2.py +5 -8
- transformers/models/cpm/tokenization_cpm.py +6 -7
- transformers/models/cpm/tokenization_cpm_fast.py +3 -5
- transformers/models/cpmant/configuration_cpmant.py +0 -1
- transformers/models/cpmant/modeling_cpmant.py +38 -40
- transformers/models/cpmant/tokenization_cpmant.py +1 -3
- transformers/models/csm/configuration_csm.py +49 -51
- transformers/models/csm/generation_csm.py +13 -14
- transformers/models/csm/modeling_csm.py +78 -81
- transformers/models/csm/modular_csm.py +56 -58
- transformers/models/csm/processing_csm.py +25 -68
- transformers/models/ctrl/configuration_ctrl.py +0 -1
- transformers/models/ctrl/modeling_ctrl.py +38 -41
- transformers/models/ctrl/tokenization_ctrl.py +0 -1
- transformers/models/cvt/configuration_cvt.py +0 -1
- transformers/models/cvt/modeling_cvt.py +13 -15
- transformers/models/cwm/__init__.py +0 -1
- transformers/models/cwm/configuration_cwm.py +3 -5
- transformers/models/cwm/modeling_cwm.py +32 -34
- transformers/models/cwm/modular_cwm.py +10 -12
- transformers/models/d_fine/configuration_d_fine.py +0 -1
- transformers/models/d_fine/modeling_d_fine.py +81 -82
- transformers/models/d_fine/modular_d_fine.py +8 -9
- transformers/models/dab_detr/configuration_dab_detr.py +0 -1
- transformers/models/dab_detr/modeling_dab_detr.py +68 -70
- transformers/models/dac/configuration_dac.py +0 -1
- transformers/models/dac/feature_extraction_dac.py +6 -9
- transformers/models/dac/modeling_dac.py +21 -23
- transformers/models/data2vec/configuration_data2vec_audio.py +0 -1
- transformers/models/data2vec/configuration_data2vec_text.py +0 -1
- transformers/models/data2vec/configuration_data2vec_vision.py +0 -1
- transformers/models/data2vec/modeling_data2vec_audio.py +52 -56
- transformers/models/data2vec/modeling_data2vec_text.py +91 -93
- transformers/models/data2vec/modeling_data2vec_vision.py +41 -42
- transformers/models/data2vec/modular_data2vec_audio.py +6 -1
- transformers/models/data2vec/modular_data2vec_text.py +51 -54
- transformers/models/dbrx/configuration_dbrx.py +18 -19
- transformers/models/dbrx/modeling_dbrx.py +39 -42
- transformers/models/dbrx/modular_dbrx.py +31 -33
- transformers/models/deberta/configuration_deberta.py +0 -1
- transformers/models/deberta/modeling_deberta.py +57 -60
- transformers/models/deberta/tokenization_deberta.py +2 -5
- transformers/models/deberta_v2/configuration_deberta_v2.py +0 -1
- transformers/models/deberta_v2/modeling_deberta_v2.py +63 -65
- transformers/models/deberta_v2/tokenization_deberta_v2.py +1 -4
- transformers/models/decision_transformer/configuration_decision_transformer.py +0 -1
- transformers/models/decision_transformer/modeling_decision_transformer.py +48 -50
- transformers/models/deepseek_v2/configuration_deepseek_v2.py +34 -37
- transformers/models/deepseek_v2/modeling_deepseek_v2.py +32 -33
- transformers/models/deepseek_v2/modular_deepseek_v2.py +40 -42
- transformers/models/deepseek_v3/configuration_deepseek_v3.py +35 -38
- transformers/models/deepseek_v3/modeling_deepseek_v3.py +31 -33
- transformers/models/deepseek_v3/modular_deepseek_v3.py +4 -5
- transformers/models/deepseek_vl/configuration_deepseek_vl.py +2 -3
- transformers/models/deepseek_vl/image_processing_deepseek_vl.py +25 -26
- transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +7 -6
- transformers/models/deepseek_vl/modeling_deepseek_vl.py +31 -31
- transformers/models/deepseek_vl/modular_deepseek_vl.py +11 -43
- transformers/models/deepseek_vl/processing_deepseek_vl.py +10 -41
- transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +3 -5
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +35 -35
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +16 -16
- transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +33 -33
- transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +71 -90
- transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py +12 -44
- transformers/models/deformable_detr/configuration_deformable_detr.py +0 -1
- transformers/models/deformable_detr/image_processing_deformable_detr.py +59 -61
- transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +17 -17
- transformers/models/deformable_detr/modeling_deformable_detr.py +66 -67
- transformers/models/deformable_detr/modular_deformable_detr.py +1 -3
- transformers/models/deit/configuration_deit.py +0 -1
- transformers/models/deit/image_processing_deit.py +18 -21
- transformers/models/deit/image_processing_deit_fast.py +0 -1
- transformers/models/deit/modeling_deit.py +16 -18
- transformers/models/depth_anything/configuration_depth_anything.py +0 -1
- transformers/models/depth_anything/modeling_depth_anything.py +5 -8
- transformers/models/depth_pro/configuration_depth_pro.py +0 -1
- transformers/models/depth_pro/image_processing_depth_pro.py +22 -23
- transformers/models/depth_pro/image_processing_depth_pro_fast.py +6 -7
- transformers/models/depth_pro/modeling_depth_pro.py +21 -23
- transformers/models/detr/configuration_detr.py +0 -1
- transformers/models/detr/image_processing_detr.py +64 -66
- transformers/models/detr/image_processing_detr_fast.py +22 -23
- transformers/models/detr/modeling_detr.py +70 -72
- transformers/models/dia/configuration_dia.py +5 -8
- transformers/models/dia/feature_extraction_dia.py +6 -9
- transformers/models/dia/generation_dia.py +40 -36
- transformers/models/dia/modeling_dia.py +61 -64
- transformers/models/dia/modular_dia.py +52 -54
- transformers/models/dia/processing_dia.py +39 -29
- transformers/models/dia/tokenization_dia.py +3 -6
- transformers/models/diffllama/configuration_diffllama.py +20 -23
- transformers/models/diffllama/modeling_diffllama.py +42 -45
- transformers/models/diffllama/modular_diffllama.py +16 -18
- transformers/models/dinat/configuration_dinat.py +0 -1
- transformers/models/dinat/modeling_dinat.py +40 -42
- transformers/models/dinov2/configuration_dinov2.py +0 -1
- transformers/models/dinov2/modeling_dinov2.py +11 -13
- transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +1 -1
- transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +12 -13
- transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +5 -7
- transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +4 -7
- transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +3 -6
- transformers/models/dinov3_vit/configuration_dinov3_vit.py +5 -8
- transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +5 -6
- transformers/models/dinov3_vit/modeling_dinov3_vit.py +14 -16
- transformers/models/dinov3_vit/modular_dinov3_vit.py +11 -13
- transformers/models/distilbert/configuration_distilbert.py +0 -1
- transformers/models/distilbert/modeling_distilbert.py +44 -46
- transformers/models/distilbert/tokenization_distilbert.py +0 -1
- transformers/models/doge/__init__.py +0 -1
- transformers/models/doge/configuration_doge.py +25 -28
- transformers/models/doge/modeling_doge.py +42 -45
- transformers/models/doge/modular_doge.py +57 -58
- transformers/models/donut/configuration_donut_swin.py +0 -1
- transformers/models/donut/image_processing_donut.py +26 -29
- transformers/models/donut/image_processing_donut_fast.py +5 -10
- transformers/models/donut/modeling_donut_swin.py +44 -46
- transformers/models/donut/processing_donut.py +5 -26
- transformers/models/dots1/configuration_dots1.py +27 -29
- transformers/models/dots1/modeling_dots1.py +31 -34
- transformers/models/dots1/modular_dots1.py +0 -1
- transformers/models/dpr/configuration_dpr.py +0 -1
- transformers/models/dpr/modeling_dpr.py +37 -39
- transformers/models/dpr/tokenization_dpr.py +7 -9
- transformers/models/dpr/tokenization_dpr_fast.py +7 -9
- transformers/models/dpt/configuration_dpt.py +0 -1
- transformers/models/dpt/image_processing_dpt.py +65 -66
- transformers/models/dpt/image_processing_dpt_fast.py +13 -14
- transformers/models/dpt/modeling_dpt.py +19 -21
- transformers/models/dpt/modular_dpt.py +10 -11
- transformers/models/edgetam/configuration_edgetam.py +0 -1
- transformers/models/edgetam/modeling_edgetam.py +39 -41
- transformers/models/edgetam/modular_edgetam.py +2 -6
- transformers/models/edgetam_video/__init__.py +0 -1
- transformers/models/edgetam_video/configuration_edgetam_video.py +0 -1
- transformers/models/edgetam_video/modeling_edgetam_video.py +76 -77
- transformers/models/edgetam_video/modular_edgetam_video.py +16 -18
- transformers/models/efficientloftr/configuration_efficientloftr.py +4 -5
- transformers/models/efficientloftr/image_processing_efficientloftr.py +14 -16
- transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +4 -4
- transformers/models/efficientloftr/modeling_efficientloftr.py +27 -29
- transformers/models/efficientloftr/modular_efficientloftr.py +1 -3
- transformers/models/efficientnet/configuration_efficientnet.py +0 -1
- transformers/models/efficientnet/image_processing_efficientnet.py +23 -26
- transformers/models/efficientnet/image_processing_efficientnet_fast.py +14 -15
- transformers/models/efficientnet/modeling_efficientnet.py +12 -14
- transformers/models/electra/configuration_electra.py +0 -1
- transformers/models/electra/modeling_electra.py +101 -103
- transformers/models/emu3/configuration_emu3.py +5 -7
- transformers/models/emu3/image_processing_emu3.py +44 -39
- transformers/models/emu3/modeling_emu3.py +59 -62
- transformers/models/emu3/modular_emu3.py +32 -34
- transformers/models/emu3/processing_emu3.py +18 -43
- transformers/models/encodec/configuration_encodec.py +2 -4
- transformers/models/encodec/feature_extraction_encodec.py +10 -13
- transformers/models/encodec/modeling_encodec.py +25 -29
- transformers/models/encoder_decoder/configuration_encoder_decoder.py +0 -1
- transformers/models/encoder_decoder/modeling_encoder_decoder.py +17 -19
- transformers/models/eomt/configuration_eomt.py +0 -1
- transformers/models/eomt/image_processing_eomt.py +53 -55
- transformers/models/eomt/image_processing_eomt_fast.py +15 -16
- transformers/models/eomt/modeling_eomt.py +16 -18
- transformers/models/eomt/modular_eomt.py +11 -13
- transformers/models/ernie/configuration_ernie.py +0 -1
- transformers/models/ernie/modeling_ernie.py +121 -132
- transformers/models/ernie/modular_ernie.py +91 -103
- transformers/models/ernie4_5/configuration_ernie4_5.py +18 -20
- transformers/models/ernie4_5/modeling_ernie4_5.py +31 -33
- transformers/models/ernie4_5/modular_ernie4_5.py +1 -3
- transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +27 -29
- transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +36 -38
- transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +7 -9
- transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +0 -1
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +34 -35
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +6 -7
- transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +84 -87
- transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +86 -89
- transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +3 -5
- transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +17 -18
- transformers/models/esm/configuration_esm.py +2 -4
- transformers/models/esm/modeling_esm.py +32 -34
- transformers/models/esm/modeling_esmfold.py +42 -44
- transformers/models/esm/openfold_utils/chunk_utils.py +6 -6
- transformers/models/esm/openfold_utils/loss.py +1 -2
- transformers/models/esm/openfold_utils/protein.py +13 -13
- transformers/models/esm/openfold_utils/tensor_utils.py +6 -6
- transformers/models/esm/tokenization_esm.py +2 -4
- transformers/models/evolla/configuration_evolla.py +29 -32
- transformers/models/evolla/modeling_evolla.py +58 -61
- transformers/models/evolla/modular_evolla.py +45 -47
- transformers/models/evolla/processing_evolla.py +23 -35
- transformers/models/exaone4/configuration_exaone4.py +19 -22
- transformers/models/exaone4/modeling_exaone4.py +32 -35
- transformers/models/exaone4/modular_exaone4.py +40 -42
- transformers/models/falcon/configuration_falcon.py +22 -25
- transformers/models/falcon/modeling_falcon.py +73 -76
- transformers/models/falcon_h1/configuration_falcon_h1.py +40 -43
- transformers/models/falcon_h1/modeling_falcon_h1.py +52 -55
- transformers/models/falcon_h1/modular_falcon_h1.py +47 -48
- transformers/models/falcon_mamba/configuration_falcon_mamba.py +0 -1
- transformers/models/falcon_mamba/modeling_falcon_mamba.py +46 -47
- transformers/models/falcon_mamba/modular_falcon_mamba.py +10 -13
- transformers/models/fast_vlm/configuration_fast_vlm.py +1 -0
- transformers/models/fast_vlm/modeling_fast_vlm.py +36 -36
- transformers/models/fast_vlm/modular_fast_vlm.py +2 -3
- transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +2 -5
- transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +45 -47
- transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +1 -3
- transformers/models/flaubert/configuration_flaubert.py +0 -1
- transformers/models/flaubert/modeling_flaubert.py +124 -128
- transformers/models/flaubert/tokenization_flaubert.py +3 -5
- transformers/models/flava/configuration_flava.py +5 -6
- transformers/models/flava/image_processing_flava.py +66 -67
- transformers/models/flava/image_processing_flava_fast.py +42 -43
- transformers/models/flava/modeling_flava.py +108 -107
- transformers/models/flava/processing_flava.py +2 -12
- transformers/models/flex_olmo/__init__.py +0 -1
- transformers/models/flex_olmo/configuration_flex_olmo.py +23 -25
- transformers/models/flex_olmo/modeling_flex_olmo.py +37 -39
- transformers/models/flex_olmo/modular_flex_olmo.py +35 -37
- transformers/models/florence2/configuration_florence2.py +0 -1
- transformers/models/florence2/modeling_florence2.py +39 -40
- transformers/models/florence2/modular_florence2.py +52 -81
- transformers/models/florence2/processing_florence2.py +18 -47
- transformers/models/fnet/configuration_fnet.py +0 -1
- transformers/models/fnet/modeling_fnet.py +69 -80
- transformers/models/fnet/tokenization_fnet.py +0 -1
- transformers/models/focalnet/configuration_focalnet.py +0 -1
- transformers/models/focalnet/modeling_focalnet.py +39 -41
- transformers/models/fsmt/configuration_fsmt.py +0 -1
- transformers/models/fsmt/modeling_fsmt.py +47 -48
- transformers/models/fsmt/tokenization_fsmt.py +3 -5
- transformers/models/funnel/configuration_funnel.py +0 -1
- transformers/models/funnel/modeling_funnel.py +91 -93
- transformers/models/funnel/tokenization_funnel.py +2 -5
- transformers/models/fuyu/configuration_fuyu.py +23 -26
- transformers/models/fuyu/image_processing_fuyu.py +29 -31
- transformers/models/fuyu/image_processing_fuyu_fast.py +12 -13
- transformers/models/fuyu/modeling_fuyu.py +26 -29
- transformers/models/fuyu/processing_fuyu.py +9 -36
- transformers/models/gemma/configuration_gemma.py +20 -23
- transformers/models/gemma/modeling_gemma.py +32 -34
- transformers/models/gemma/modular_gemma.py +28 -29
- transformers/models/gemma/tokenization_gemma.py +3 -6
- transformers/models/gemma2/configuration_gemma2.py +25 -28
- transformers/models/gemma2/modeling_gemma2.py +34 -37
- transformers/models/gemma2/modular_gemma2.py +55 -57
- transformers/models/gemma3/configuration_gemma3.py +28 -29
- transformers/models/gemma3/image_processing_gemma3.py +29 -31
- transformers/models/gemma3/image_processing_gemma3_fast.py +9 -10
- transformers/models/gemma3/modeling_gemma3.py +86 -89
- transformers/models/gemma3/modular_gemma3.py +85 -86
- transformers/models/gemma3/processing_gemma3.py +5 -5
- transformers/models/gemma3n/configuration_gemma3n.py +9 -10
- transformers/models/gemma3n/feature_extraction_gemma3n.py +9 -11
- transformers/models/gemma3n/modeling_gemma3n.py +80 -89
- transformers/models/gemma3n/modular_gemma3n.py +66 -75
- transformers/models/gemma3n/processing_gemma3n.py +12 -26
- transformers/models/git/configuration_git.py +0 -1
- transformers/models/git/modeling_git.py +84 -86
- transformers/models/git/processing_git.py +2 -14
- transformers/models/glm/configuration_glm.py +19 -21
- transformers/models/glm/modeling_glm.py +32 -35
- transformers/models/glm/modular_glm.py +4 -7
- transformers/models/glm4/configuration_glm4.py +19 -21
- transformers/models/glm4/modeling_glm4.py +35 -37
- transformers/models/glm4/modular_glm4.py +8 -10
- transformers/models/glm46v/configuration_glm46v.py +0 -1
- transformers/models/glm46v/image_processing_glm46v.py +35 -36
- transformers/models/glm46v/image_processing_glm46v_fast.py +7 -7
- transformers/models/glm46v/modeling_glm46v.py +51 -51
- transformers/models/glm46v/modular_glm46v.py +1 -3
- transformers/models/glm46v/processing_glm46v.py +7 -41
- transformers/models/glm46v/video_processing_glm46v.py +9 -11
- transformers/models/glm4_moe/configuration_glm4_moe.py +25 -28
- transformers/models/glm4_moe/modeling_glm4_moe.py +32 -35
- transformers/models/glm4_moe/modular_glm4_moe.py +26 -29
- transformers/models/glm4_moe_lite/__init__.py +28 -0
- transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +235 -0
- transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +740 -0
- transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +304 -0
- transformers/models/glm4v/configuration_glm4v.py +14 -17
- transformers/models/glm4v/image_processing_glm4v.py +34 -36
- transformers/models/glm4v/image_processing_glm4v_fast.py +6 -7
- transformers/models/glm4v/modeling_glm4v.py +133 -151
- transformers/models/glm4v/modular_glm4v.py +131 -182
- transformers/models/glm4v/processing_glm4v.py +7 -41
- transformers/models/glm4v/video_processing_glm4v.py +9 -11
- transformers/models/glm4v_moe/configuration_glm4v_moe.py +119 -122
- transformers/models/glm4v_moe/modeling_glm4v_moe.py +237 -297
- transformers/models/glm4v_moe/modular_glm4v_moe.py +54 -163
- transformers/models/glm_image/__init__.py +31 -0
- transformers/models/glm_image/configuration_glm_image.py +352 -0
- transformers/models/glm_image/image_processing_glm_image.py +503 -0
- transformers/models/glm_image/image_processing_glm_image_fast.py +296 -0
- transformers/models/glm_image/modeling_glm_image.py +1590 -0
- transformers/models/glm_image/modular_glm_image.py +1480 -0
- transformers/models/glm_image/processing_glm_image.py +217 -0
- transformers/models/glmasr/__init__.py +0 -1
- transformers/models/glmasr/configuration_glmasr.py +0 -1
- transformers/models/glmasr/modeling_glmasr.py +17 -18
- transformers/models/glmasr/modular_glmasr.py +16 -18
- transformers/models/glmasr/processing_glmasr.py +7 -8
- transformers/models/glpn/configuration_glpn.py +0 -1
- transformers/models/glpn/image_processing_glpn.py +11 -12
- transformers/models/glpn/image_processing_glpn_fast.py +8 -9
- transformers/models/glpn/modeling_glpn.py +10 -12
- transformers/models/got_ocr2/configuration_got_ocr2.py +5 -8
- transformers/models/got_ocr2/image_processing_got_ocr2.py +22 -24
- transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +6 -7
- transformers/models/got_ocr2/modeling_got_ocr2.py +40 -42
- transformers/models/got_ocr2/modular_got_ocr2.py +31 -34
- transformers/models/got_ocr2/processing_got_ocr2.py +42 -63
- transformers/models/gpt2/configuration_gpt2.py +0 -1
- transformers/models/gpt2/modeling_gpt2.py +106 -108
- transformers/models/gpt2/tokenization_gpt2.py +6 -9
- transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +0 -1
- transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +73 -80
- transformers/models/gpt_neo/configuration_gpt_neo.py +0 -1
- transformers/models/gpt_neo/modeling_gpt_neo.py +63 -64
- transformers/models/gpt_neox/configuration_gpt_neox.py +19 -22
- transformers/models/gpt_neox/modeling_gpt_neox.py +70 -72
- transformers/models/gpt_neox/modular_gpt_neox.py +64 -66
- transformers/models/gpt_neox/tokenization_gpt_neox.py +2 -5
- transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +15 -18
- transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +41 -44
- transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +1 -3
- transformers/models/gpt_oss/configuration_gpt_oss.py +21 -24
- transformers/models/gpt_oss/modeling_gpt_oss.py +34 -35
- transformers/models/gpt_oss/modular_gpt_oss.py +17 -19
- transformers/models/gpt_sw3/tokenization_gpt_sw3.py +4 -4
- transformers/models/gptj/configuration_gptj.py +0 -1
- transformers/models/gptj/modeling_gptj.py +82 -81
- transformers/models/granite/configuration_granite.py +23 -26
- transformers/models/granite/modeling_granite.py +39 -41
- transformers/models/granite/modular_granite.py +29 -31
- transformers/models/granite_speech/configuration_granite_speech.py +0 -1
- transformers/models/granite_speech/feature_extraction_granite_speech.py +1 -3
- transformers/models/granite_speech/modeling_granite_speech.py +21 -23
- transformers/models/granite_speech/processing_granite_speech.py +11 -4
- transformers/models/granitemoe/configuration_granitemoe.py +26 -29
- transformers/models/granitemoe/modeling_granitemoe.py +35 -37
- transformers/models/granitemoe/modular_granitemoe.py +21 -23
- transformers/models/granitemoehybrid/__init__.py +0 -1
- transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +38 -41
- transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +60 -64
- transformers/models/granitemoehybrid/modular_granitemoehybrid.py +18 -20
- transformers/models/granitemoeshared/configuration_granitemoeshared.py +27 -30
- transformers/models/granitemoeshared/modeling_granitemoeshared.py +48 -52
- transformers/models/granitemoeshared/modular_granitemoeshared.py +19 -21
- transformers/models/grounding_dino/configuration_grounding_dino.py +0 -1
- transformers/models/grounding_dino/image_processing_grounding_dino.py +60 -62
- transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +17 -18
- transformers/models/grounding_dino/modeling_grounding_dino.py +94 -96
- transformers/models/grounding_dino/modular_grounding_dino.py +2 -3
- transformers/models/grounding_dino/processing_grounding_dino.py +10 -38
- transformers/models/groupvit/configuration_groupvit.py +0 -1
- transformers/models/groupvit/modeling_groupvit.py +69 -70
- transformers/models/helium/configuration_helium.py +20 -22
- transformers/models/helium/modeling_helium.py +33 -36
- transformers/models/helium/modular_helium.py +3 -7
- transformers/models/herbert/tokenization_herbert.py +4 -6
- transformers/models/hgnet_v2/configuration_hgnet_v2.py +0 -1
- transformers/models/hgnet_v2/modeling_hgnet_v2.py +6 -9
- transformers/models/hgnet_v2/modular_hgnet_v2.py +6 -9
- transformers/models/hiera/configuration_hiera.py +0 -1
- transformers/models/hiera/modeling_hiera.py +60 -62
- transformers/models/hubert/configuration_hubert.py +0 -1
- transformers/models/hubert/modeling_hubert.py +35 -37
- transformers/models/hubert/modular_hubert.py +8 -11
- transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +21 -24
- transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +30 -33
- transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +3 -5
- transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +25 -28
- transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +32 -35
- transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +5 -7
- transformers/models/ibert/configuration_ibert.py +0 -1
- transformers/models/ibert/modeling_ibert.py +60 -62
- transformers/models/ibert/quant_modules.py +0 -1
- transformers/models/idefics/configuration_idefics.py +0 -1
- transformers/models/idefics/image_processing_idefics.py +13 -15
- transformers/models/idefics/modeling_idefics.py +60 -61
- transformers/models/idefics/perceiver.py +1 -3
- transformers/models/idefics/processing_idefics.py +32 -48
- transformers/models/idefics/vision.py +22 -24
- transformers/models/idefics2/configuration_idefics2.py +0 -1
- transformers/models/idefics2/image_processing_idefics2.py +31 -32
- transformers/models/idefics2/image_processing_idefics2_fast.py +7 -8
- transformers/models/idefics2/modeling_idefics2.py +56 -58
- transformers/models/idefics2/processing_idefics2.py +10 -68
- transformers/models/idefics3/configuration_idefics3.py +0 -1
- transformers/models/idefics3/image_processing_idefics3.py +42 -43
- transformers/models/idefics3/image_processing_idefics3_fast.py +11 -12
- transformers/models/idefics3/modeling_idefics3.py +52 -54
- transformers/models/idefics3/processing_idefics3.py +15 -69
- transformers/models/ijepa/configuration_ijepa.py +0 -1
- transformers/models/ijepa/modeling_ijepa.py +10 -11
- transformers/models/ijepa/modular_ijepa.py +5 -7
- transformers/models/imagegpt/configuration_imagegpt.py +0 -1
- transformers/models/imagegpt/image_processing_imagegpt.py +17 -18
- transformers/models/imagegpt/image_processing_imagegpt_fast.py +8 -9
- transformers/models/imagegpt/modeling_imagegpt.py +57 -58
- transformers/models/informer/configuration_informer.py +6 -9
- transformers/models/informer/modeling_informer.py +84 -86
- transformers/models/informer/modular_informer.py +13 -16
- transformers/models/instructblip/configuration_instructblip.py +0 -1
- transformers/models/instructblip/modeling_instructblip.py +43 -44
- transformers/models/instructblip/processing_instructblip.py +10 -36
- transformers/models/instructblipvideo/configuration_instructblipvideo.py +0 -1
- transformers/models/instructblipvideo/modeling_instructblipvideo.py +55 -55
- transformers/models/instructblipvideo/modular_instructblipvideo.py +34 -36
- transformers/models/instructblipvideo/processing_instructblipvideo.py +14 -33
- transformers/models/instructblipvideo/video_processing_instructblipvideo.py +4 -5
- transformers/models/internvl/configuration_internvl.py +0 -1
- transformers/models/internvl/modeling_internvl.py +41 -43
- transformers/models/internvl/modular_internvl.py +19 -21
- transformers/models/internvl/processing_internvl.py +12 -45
- transformers/models/internvl/video_processing_internvl.py +8 -9
- transformers/models/jais2/configuration_jais2.py +20 -22
- transformers/models/jais2/modeling_jais2.py +32 -34
- transformers/models/jais2/modular_jais2.py +20 -22
- transformers/models/jamba/configuration_jamba.py +0 -1
- transformers/models/jamba/modeling_jamba.py +43 -46
- transformers/models/jamba/modular_jamba.py +37 -38
- transformers/models/janus/configuration_janus.py +0 -1
- transformers/models/janus/image_processing_janus.py +35 -37
- transformers/models/janus/image_processing_janus_fast.py +12 -13
- transformers/models/janus/modeling_janus.py +41 -43
- transformers/models/janus/modular_janus.py +60 -63
- transformers/models/janus/processing_janus.py +17 -43
- transformers/models/jetmoe/configuration_jetmoe.py +20 -23
- transformers/models/jetmoe/modeling_jetmoe.py +39 -42
- transformers/models/jetmoe/modular_jetmoe.py +30 -33
- transformers/models/kosmos2/configuration_kosmos2.py +0 -1
- transformers/models/kosmos2/modeling_kosmos2.py +145 -146
- transformers/models/kosmos2/processing_kosmos2.py +40 -55
- transformers/models/kosmos2_5/__init__.py +0 -1
- transformers/models/kosmos2_5/configuration_kosmos2_5.py +0 -1
- transformers/models/kosmos2_5/image_processing_kosmos2_5.py +10 -12
- transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +2 -11
- transformers/models/kosmos2_5/modeling_kosmos2_5.py +108 -109
- transformers/models/kosmos2_5/processing_kosmos2_5.py +8 -29
- transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +23 -25
- transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py +12 -14
- transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +59 -66
- transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +19 -21
- transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py +2 -8
- transformers/models/lasr/configuration_lasr.py +1 -3
- transformers/models/lasr/feature_extraction_lasr.py +10 -12
- transformers/models/lasr/modeling_lasr.py +18 -21
- transformers/models/lasr/modular_lasr.py +8 -10
- transformers/models/lasr/processing_lasr.py +12 -6
- transformers/models/lasr/tokenization_lasr.py +2 -4
- transformers/models/layoutlm/configuration_layoutlm.py +0 -1
- transformers/models/layoutlm/modeling_layoutlm.py +67 -69
- transformers/models/layoutlmv2/configuration_layoutlmv2.py +0 -1
- transformers/models/layoutlmv2/image_processing_layoutlmv2.py +18 -21
- transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +5 -6
- transformers/models/layoutlmv2/modeling_layoutlmv2.py +48 -50
- transformers/models/layoutlmv2/processing_layoutlmv2.py +14 -44
- transformers/models/layoutlmv2/tokenization_layoutlmv2.py +63 -74
- transformers/models/layoutlmv3/configuration_layoutlmv3.py +0 -1
- transformers/models/layoutlmv3/image_processing_layoutlmv3.py +24 -26
- transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +7 -8
- transformers/models/layoutlmv3/modeling_layoutlmv3.py +49 -51
- transformers/models/layoutlmv3/processing_layoutlmv3.py +14 -46
- transformers/models/layoutlmv3/tokenization_layoutlmv3.py +64 -75
- transformers/models/layoutxlm/configuration_layoutxlm.py +0 -1
- transformers/models/layoutxlm/modular_layoutxlm.py +0 -1
- transformers/models/layoutxlm/processing_layoutxlm.py +14 -44
- transformers/models/layoutxlm/tokenization_layoutxlm.py +65 -76
- transformers/models/led/configuration_led.py +1 -4
- transformers/models/led/modeling_led.py +113 -267
- transformers/models/levit/configuration_levit.py +0 -1
- transformers/models/levit/image_processing_levit.py +19 -21
- transformers/models/levit/image_processing_levit_fast.py +0 -1
- transformers/models/levit/modeling_levit.py +17 -19
- transformers/models/lfm2/configuration_lfm2.py +22 -23
- transformers/models/lfm2/modeling_lfm2.py +42 -44
- transformers/models/lfm2/modular_lfm2.py +29 -29
- transformers/models/lfm2_moe/__init__.py +0 -1
- transformers/models/lfm2_moe/configuration_lfm2_moe.py +1 -2
- transformers/models/lfm2_moe/modeling_lfm2_moe.py +44 -45
- transformers/models/lfm2_moe/modular_lfm2_moe.py +8 -9
- transformers/models/lfm2_vl/configuration_lfm2_vl.py +0 -1
- transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +34 -5
- transformers/models/lfm2_vl/modeling_lfm2_vl.py +31 -33
- transformers/models/lfm2_vl/modular_lfm2_vl.py +24 -27
- transformers/models/lfm2_vl/processing_lfm2_vl.py +14 -34
- transformers/models/lightglue/image_processing_lightglue.py +16 -15
- transformers/models/lightglue/image_processing_lightglue_fast.py +4 -4
- transformers/models/lightglue/modeling_lightglue.py +28 -30
- transformers/models/lightglue/modular_lightglue.py +28 -28
- transformers/models/lighton_ocr/__init__.py +28 -0
- transformers/models/lighton_ocr/configuration_lighton_ocr.py +128 -0
- transformers/models/lighton_ocr/modeling_lighton_ocr.py +460 -0
- transformers/models/lighton_ocr/modular_lighton_ocr.py +403 -0
- transformers/models/lighton_ocr/processing_lighton_ocr.py +229 -0
- transformers/models/lilt/configuration_lilt.py +0 -1
- transformers/models/lilt/modeling_lilt.py +53 -55
- transformers/models/llama/configuration_llama.py +21 -24
- transformers/models/llama/modeling_llama.py +31 -34
- transformers/models/llama/tokenization_llama.py +2 -4
- transformers/models/llama4/configuration_llama4.py +20 -22
- transformers/models/llama4/image_processing_llama4_fast.py +8 -9
- transformers/models/llama4/modeling_llama4.py +70 -71
- transformers/models/llama4/processing_llama4.py +33 -57
- transformers/models/llava/configuration_llava.py +0 -1
- transformers/models/llava/image_processing_llava.py +25 -28
- transformers/models/llava/image_processing_llava_fast.py +6 -7
- transformers/models/llava/modeling_llava.py +35 -37
- transformers/models/llava/processing_llava.py +18 -51
- transformers/models/llava_next/configuration_llava_next.py +0 -1
- transformers/models/llava_next/image_processing_llava_next.py +43 -45
- transformers/models/llava_next/image_processing_llava_next_fast.py +5 -6
- transformers/models/llava_next/modeling_llava_next.py +42 -44
- transformers/models/llava_next/processing_llava_next.py +18 -47
- transformers/models/llava_next_video/configuration_llava_next_video.py +0 -1
- transformers/models/llava_next_video/modeling_llava_next_video.py +53 -55
- transformers/models/llava_next_video/modular_llava_next_video.py +44 -46
- transformers/models/llava_next_video/processing_llava_next_video.py +21 -63
- transformers/models/llava_next_video/video_processing_llava_next_video.py +0 -1
- transformers/models/llava_onevision/configuration_llava_onevision.py +0 -1
- transformers/models/llava_onevision/image_processing_llava_onevision.py +40 -42
- transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +6 -7
- transformers/models/llava_onevision/modeling_llava_onevision.py +60 -62
- transformers/models/llava_onevision/modular_llava_onevision.py +51 -52
- transformers/models/llava_onevision/processing_llava_onevision.py +21 -53
- transformers/models/llava_onevision/video_processing_llava_onevision.py +0 -1
- transformers/models/longcat_flash/__init__.py +0 -1
- transformers/models/longcat_flash/configuration_longcat_flash.py +32 -35
- transformers/models/longcat_flash/modeling_longcat_flash.py +30 -31
- transformers/models/longcat_flash/modular_longcat_flash.py +17 -19
- transformers/models/longformer/configuration_longformer.py +1 -4
- transformers/models/longformer/modeling_longformer.py +99 -101
- transformers/models/longt5/configuration_longt5.py +0 -1
- transformers/models/longt5/modeling_longt5.py +43 -44
- transformers/models/luke/configuration_luke.py +0 -1
- transformers/models/luke/modeling_luke.py +179 -181
- transformers/models/luke/tokenization_luke.py +99 -105
- transformers/models/lw_detr/__init__.py +27 -0
- transformers/models/lw_detr/configuration_lw_detr.py +374 -0
- transformers/models/lw_detr/modeling_lw_detr.py +1698 -0
- transformers/models/lw_detr/modular_lw_detr.py +1611 -0
- transformers/models/lxmert/configuration_lxmert.py +0 -1
- transformers/models/lxmert/modeling_lxmert.py +63 -74
- transformers/models/m2m_100/configuration_m2m_100.py +0 -1
- transformers/models/m2m_100/modeling_m2m_100.py +69 -71
- transformers/models/m2m_100/tokenization_m2m_100.py +8 -8
- transformers/models/mamba/configuration_mamba.py +0 -1
- transformers/models/mamba/modeling_mamba.py +43 -44
- transformers/models/mamba2/configuration_mamba2.py +0 -1
- transformers/models/mamba2/modeling_mamba2.py +44 -46
- transformers/models/marian/configuration_marian.py +0 -1
- transformers/models/marian/modeling_marian.py +84 -86
- transformers/models/marian/tokenization_marian.py +6 -6
- transformers/models/markuplm/configuration_markuplm.py +0 -1
- transformers/models/markuplm/feature_extraction_markuplm.py +1 -2
- transformers/models/markuplm/modeling_markuplm.py +60 -62
- transformers/models/markuplm/processing_markuplm.py +31 -38
- transformers/models/markuplm/tokenization_markuplm.py +67 -77
- transformers/models/mask2former/configuration_mask2former.py +4 -7
- transformers/models/mask2former/image_processing_mask2former.py +84 -85
- transformers/models/mask2former/image_processing_mask2former_fast.py +29 -29
- transformers/models/mask2former/modeling_mask2former.py +90 -92
- transformers/models/mask2former/modular_mask2former.py +6 -8
- transformers/models/maskformer/configuration_maskformer.py +5 -8
- transformers/models/maskformer/configuration_maskformer_swin.py +0 -1
- transformers/models/maskformer/image_processing_maskformer.py +84 -85
- transformers/models/maskformer/image_processing_maskformer_fast.py +28 -29
- transformers/models/maskformer/modeling_maskformer.py +56 -58
- transformers/models/maskformer/modeling_maskformer_swin.py +18 -20
- transformers/models/mbart/configuration_mbart.py +0 -1
- transformers/models/mbart/modeling_mbart.py +111 -113
- transformers/models/mbart/tokenization_mbart.py +2 -4
- transformers/models/mbart50/tokenization_mbart50.py +3 -5
- transformers/models/megatron_bert/configuration_megatron_bert.py +0 -1
- transformers/models/megatron_bert/modeling_megatron_bert.py +139 -150
- transformers/models/metaclip_2/modeling_metaclip_2.py +46 -46
- transformers/models/metaclip_2/modular_metaclip_2.py +19 -21
- transformers/models/mgp_str/configuration_mgp_str.py +0 -1
- transformers/models/mgp_str/modeling_mgp_str.py +14 -16
- transformers/models/mgp_str/processing_mgp_str.py +3 -20
- transformers/models/mgp_str/tokenization_mgp_str.py +1 -3
- transformers/models/mimi/configuration_mimi.py +38 -40
- transformers/models/mimi/modeling_mimi.py +76 -79
- transformers/models/minimax/__init__.py +0 -1
- transformers/models/minimax/configuration_minimax.py +32 -36
- transformers/models/minimax/modeling_minimax.py +41 -44
- transformers/models/minimax/modular_minimax.py +50 -53
- transformers/models/minimax_m2/__init__.py +28 -0
- transformers/models/minimax_m2/configuration_minimax_m2.py +211 -0
- transformers/models/minimax_m2/modeling_minimax_m2.py +704 -0
- transformers/models/minimax_m2/modular_minimax_m2.py +369 -0
- transformers/models/ministral/configuration_ministral.py +20 -22
- transformers/models/ministral/modeling_ministral.py +31 -33
- transformers/models/ministral/modular_ministral.py +27 -29
- transformers/models/ministral3/configuration_ministral3.py +19 -22
- transformers/models/ministral3/modeling_ministral3.py +31 -33
- transformers/models/ministral3/modular_ministral3.py +4 -5
- transformers/models/mistral/configuration_mistral.py +19 -22
- transformers/models/mistral/modeling_mistral.py +31 -33
- transformers/models/mistral/modular_mistral.py +11 -12
- transformers/models/mistral3/configuration_mistral3.py +0 -1
- transformers/models/mistral3/modeling_mistral3.py +43 -42
- transformers/models/mistral3/modular_mistral3.py +35 -35
- transformers/models/mixtral/configuration_mixtral.py +24 -27
- transformers/models/mixtral/modeling_mixtral.py +35 -38
- transformers/models/mixtral/modular_mixtral.py +26 -29
- transformers/models/mlcd/configuration_mlcd.py +0 -1
- transformers/models/mlcd/modeling_mlcd.py +10 -12
- transformers/models/mlcd/modular_mlcd.py +9 -11
- transformers/models/mllama/configuration_mllama.py +5 -8
- transformers/models/mllama/image_processing_mllama.py +23 -25
- transformers/models/mllama/image_processing_mllama_fast.py +5 -6
- transformers/models/mllama/modeling_mllama.py +81 -84
- transformers/models/mllama/processing_mllama.py +6 -55
- transformers/models/mluke/tokenization_mluke.py +97 -103
- transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +0 -1
- transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +94 -96
- transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +0 -1
- transformers/models/mobilebert/configuration_mobilebert.py +0 -1
- transformers/models/mobilebert/modeling_mobilebert.py +75 -85
- transformers/models/mobilebert/tokenization_mobilebert.py +0 -1
- transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +0 -1
- transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +20 -23
- transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py +0 -1
- transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +13 -16
- transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +0 -1
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +48 -51
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +10 -11
- transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +17 -20
- transformers/models/mobilevit/configuration_mobilevit.py +0 -1
- transformers/models/mobilevit/image_processing_mobilevit.py +41 -44
- transformers/models/mobilevit/image_processing_mobilevit_fast.py +8 -9
- transformers/models/mobilevit/modeling_mobilevit.py +17 -19
- transformers/models/mobilevitv2/configuration_mobilevitv2.py +0 -1
- transformers/models/mobilevitv2/modeling_mobilevitv2.py +17 -20
- transformers/models/modernbert/configuration_modernbert.py +34 -34
- transformers/models/modernbert/modeling_modernbert.py +123 -125
- transformers/models/modernbert/modular_modernbert.py +155 -155
- transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +30 -32
- transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +45 -47
- transformers/models/modernbert_decoder/modular_modernbert_decoder.py +69 -70
- transformers/models/moonshine/configuration_moonshine.py +22 -24
- transformers/models/moonshine/modeling_moonshine.py +63 -65
- transformers/models/moonshine/modular_moonshine.py +72 -73
- transformers/models/moshi/configuration_moshi.py +18 -21
- transformers/models/moshi/modeling_moshi.py +130 -133
- transformers/models/mpnet/configuration_mpnet.py +0 -1
- transformers/models/mpnet/modeling_mpnet.py +55 -57
- transformers/models/mpnet/tokenization_mpnet.py +1 -4
- transformers/models/mpt/configuration_mpt.py +1 -9
- transformers/models/mpt/modeling_mpt.py +58 -60
- transformers/models/mra/configuration_mra.py +0 -1
- transformers/models/mra/modeling_mra.py +54 -56
- transformers/models/mt5/configuration_mt5.py +0 -1
- transformers/models/mt5/modeling_mt5.py +75 -77
- transformers/models/musicgen/configuration_musicgen.py +0 -1
- transformers/models/musicgen/modeling_musicgen.py +108 -111
- transformers/models/musicgen/processing_musicgen.py +3 -21
- transformers/models/musicgen_melody/configuration_musicgen_melody.py +0 -1
- transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +8 -9
- transformers/models/musicgen_melody/modeling_musicgen_melody.py +106 -109
- transformers/models/musicgen_melody/processing_musicgen_melody.py +3 -22
- transformers/models/mvp/configuration_mvp.py +0 -1
- transformers/models/mvp/modeling_mvp.py +115 -119
- transformers/models/myt5/tokenization_myt5.py +8 -10
- transformers/models/nanochat/configuration_nanochat.py +0 -1
- transformers/models/nanochat/modeling_nanochat.py +32 -35
- transformers/models/nanochat/modular_nanochat.py +12 -14
- transformers/models/nemotron/configuration_nemotron.py +20 -23
- transformers/models/nemotron/modeling_nemotron.py +49 -52
- transformers/models/nllb/tokenization_nllb.py +7 -9
- transformers/models/nllb_moe/configuration_nllb_moe.py +0 -1
- transformers/models/nllb_moe/modeling_nllb_moe.py +67 -69
- transformers/models/nougat/image_processing_nougat.py +29 -32
- transformers/models/nougat/image_processing_nougat_fast.py +4 -5
- transformers/models/nougat/processing_nougat.py +37 -39
- transformers/models/nougat/tokenization_nougat.py +5 -7
- transformers/models/nystromformer/configuration_nystromformer.py +0 -1
- transformers/models/nystromformer/modeling_nystromformer.py +61 -63
- transformers/models/olmo/configuration_olmo.py +18 -21
- transformers/models/olmo/modeling_olmo.py +31 -34
- transformers/models/olmo/modular_olmo.py +5 -9
- transformers/models/olmo2/configuration_olmo2.py +18 -21
- transformers/models/olmo2/modeling_olmo2.py +32 -35
- transformers/models/olmo2/modular_olmo2.py +29 -31
- transformers/models/olmo3/__init__.py +0 -1
- transformers/models/olmo3/configuration_olmo3.py +20 -23
- transformers/models/olmo3/modeling_olmo3.py +31 -34
- transformers/models/olmo3/modular_olmo3.py +31 -33
- transformers/models/olmoe/configuration_olmoe.py +24 -26
- transformers/models/olmoe/modeling_olmoe.py +37 -39
- transformers/models/olmoe/modular_olmoe.py +12 -13
- transformers/models/omdet_turbo/configuration_omdet_turbo.py +0 -1
- transformers/models/omdet_turbo/modeling_omdet_turbo.py +38 -40
- transformers/models/omdet_turbo/processing_omdet_turbo.py +19 -67
- transformers/models/oneformer/configuration_oneformer.py +4 -7
- transformers/models/oneformer/image_processing_oneformer.py +83 -84
- transformers/models/oneformer/image_processing_oneformer_fast.py +33 -34
- transformers/models/oneformer/modeling_oneformer.py +123 -124
- transformers/models/oneformer/processing_oneformer.py +28 -43
- transformers/models/openai/configuration_openai.py +0 -1
- transformers/models/openai/modeling_openai.py +50 -51
- transformers/models/openai/tokenization_openai.py +2 -5
- transformers/models/opt/configuration_opt.py +0 -1
- transformers/models/opt/modeling_opt.py +74 -75
- transformers/models/ovis2/__init__.py +0 -1
- transformers/models/ovis2/configuration_ovis2.py +0 -1
- transformers/models/ovis2/image_processing_ovis2.py +22 -24
- transformers/models/ovis2/image_processing_ovis2_fast.py +6 -7
- transformers/models/ovis2/modeling_ovis2.py +43 -45
- transformers/models/ovis2/modular_ovis2.py +30 -32
- transformers/models/ovis2/processing_ovis2.py +12 -40
- transformers/models/owlv2/configuration_owlv2.py +0 -1
- transformers/models/owlv2/image_processing_owlv2.py +20 -21
- transformers/models/owlv2/image_processing_owlv2_fast.py +7 -8
- transformers/models/owlv2/modeling_owlv2.py +82 -87
- transformers/models/owlv2/modular_owlv2.py +6 -7
- transformers/models/owlv2/processing_owlv2.py +20 -49
- transformers/models/owlvit/configuration_owlvit.py +0 -1
- transformers/models/owlvit/image_processing_owlvit.py +21 -22
- transformers/models/owlvit/image_processing_owlvit_fast.py +2 -3
- transformers/models/owlvit/modeling_owlvit.py +81 -86
- transformers/models/owlvit/processing_owlvit.py +20 -48
- transformers/models/paddleocr_vl/__init__.py +0 -1
- transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +19 -19
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +34 -35
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +12 -12
- transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +76 -76
- transformers/models/paddleocr_vl/modular_paddleocr_vl.py +68 -68
- transformers/models/paddleocr_vl/processing_paddleocr_vl.py +1 -3
- transformers/models/paligemma/configuration_paligemma.py +0 -1
- transformers/models/paligemma/modeling_paligemma.py +51 -53
- transformers/models/paligemma/processing_paligemma.py +13 -66
- transformers/models/parakeet/configuration_parakeet.py +1 -4
- transformers/models/parakeet/feature_extraction_parakeet.py +10 -12
- transformers/models/parakeet/modeling_parakeet.py +18 -22
- transformers/models/parakeet/modular_parakeet.py +16 -18
- transformers/models/parakeet/processing_parakeet.py +12 -5
- transformers/models/parakeet/tokenization_parakeet.py +2 -4
- transformers/models/patchtsmixer/configuration_patchtsmixer.py +5 -8
- transformers/models/patchtsmixer/modeling_patchtsmixer.py +60 -62
- transformers/models/patchtst/configuration_patchtst.py +6 -9
- transformers/models/patchtst/modeling_patchtst.py +72 -74
- transformers/models/pe_audio/__init__.py +0 -1
- transformers/models/pe_audio/configuration_pe_audio.py +14 -16
- transformers/models/pe_audio/feature_extraction_pe_audio.py +6 -8
- transformers/models/pe_audio/modeling_pe_audio.py +26 -27
- transformers/models/pe_audio/modular_pe_audio.py +16 -17
- transformers/models/pe_audio/processing_pe_audio.py +0 -1
- transformers/models/pe_audio_video/__init__.py +0 -1
- transformers/models/pe_audio_video/configuration_pe_audio_video.py +15 -17
- transformers/models/pe_audio_video/modeling_pe_audio_video.py +60 -61
- transformers/models/pe_audio_video/modular_pe_audio_video.py +52 -53
- transformers/models/pe_audio_video/processing_pe_audio_video.py +0 -1
- transformers/models/pe_video/__init__.py +0 -1
- transformers/models/pe_video/configuration_pe_video.py +14 -16
- transformers/models/pe_video/modeling_pe_video.py +21 -22
- transformers/models/pe_video/modular_pe_video.py +11 -12
- transformers/models/pe_video/video_processing_pe_video.py +2 -4
- transformers/models/pegasus/configuration_pegasus.py +0 -1
- transformers/models/pegasus/modeling_pegasus.py +63 -65
- transformers/models/pegasus/tokenization_pegasus.py +1 -4
- transformers/models/pegasus_x/configuration_pegasus_x.py +0 -1
- transformers/models/pegasus_x/modeling_pegasus_x.py +50 -52
- transformers/models/perceiver/configuration_perceiver.py +0 -1
- transformers/models/perceiver/image_processing_perceiver.py +22 -25
- transformers/models/perceiver/image_processing_perceiver_fast.py +5 -6
- transformers/models/perceiver/modeling_perceiver.py +135 -136
- transformers/models/perceiver/tokenization_perceiver.py +3 -6
- transformers/models/perception_lm/configuration_perception_lm.py +0 -1
- transformers/models/perception_lm/image_processing_perception_lm_fast.py +8 -9
- transformers/models/perception_lm/modeling_perception_lm.py +38 -40
- transformers/models/perception_lm/modular_perception_lm.py +31 -33
- transformers/models/perception_lm/processing_perception_lm.py +13 -47
- transformers/models/perception_lm/video_processing_perception_lm.py +0 -1
- transformers/models/persimmon/configuration_persimmon.py +18 -21
- transformers/models/persimmon/modeling_persimmon.py +39 -42
- transformers/models/phi/configuration_phi.py +19 -22
- transformers/models/phi/modeling_phi.py +35 -37
- transformers/models/phi/modular_phi.py +23 -23
- transformers/models/phi3/configuration_phi3.py +23 -26
- transformers/models/phi3/modeling_phi3.py +33 -36
- transformers/models/phi3/modular_phi3.py +13 -17
- transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +25 -26
- transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py +7 -9
- transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +7 -7
- transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +54 -56
- transformers/models/phi4_multimodal/modular_phi4_multimodal.py +59 -60
- transformers/models/phi4_multimodal/processing_phi4_multimodal.py +7 -42
- transformers/models/phimoe/configuration_phimoe.py +26 -29
- transformers/models/phimoe/modeling_phimoe.py +35 -38
- transformers/models/phimoe/modular_phimoe.py +0 -1
- transformers/models/phobert/tokenization_phobert.py +4 -6
- transformers/models/pix2struct/configuration_pix2struct.py +0 -1
- transformers/models/pix2struct/image_processing_pix2struct.py +15 -19
- transformers/models/pix2struct/image_processing_pix2struct_fast.py +7 -10
- transformers/models/pix2struct/modeling_pix2struct.py +42 -45
- transformers/models/pix2struct/processing_pix2struct.py +5 -26
- transformers/models/pixio/__init__.py +0 -1
- transformers/models/pixio/configuration_pixio.py +0 -1
- transformers/models/pixio/modeling_pixio.py +7 -9
- transformers/models/pixio/modular_pixio.py +3 -6
- transformers/models/pixtral/configuration_pixtral.py +11 -14
- transformers/models/pixtral/image_processing_pixtral.py +26 -28
- transformers/models/pixtral/image_processing_pixtral_fast.py +5 -6
- transformers/models/pixtral/modeling_pixtral.py +22 -25
- transformers/models/pixtral/processing_pixtral.py +18 -52
- transformers/models/plbart/configuration_plbart.py +0 -1
- transformers/models/plbart/modeling_plbart.py +100 -102
- transformers/models/plbart/modular_plbart.py +30 -32
- transformers/models/plbart/tokenization_plbart.py +4 -5
- transformers/models/poolformer/configuration_poolformer.py +0 -1
- transformers/models/poolformer/image_processing_poolformer.py +21 -24
- transformers/models/poolformer/image_processing_poolformer_fast.py +6 -7
- transformers/models/poolformer/modeling_poolformer.py +10 -12
- transformers/models/pop2piano/configuration_pop2piano.py +0 -1
- transformers/models/pop2piano/feature_extraction_pop2piano.py +6 -9
- transformers/models/pop2piano/modeling_pop2piano.py +22 -23
- transformers/models/pop2piano/processing_pop2piano.py +25 -33
- transformers/models/pop2piano/tokenization_pop2piano.py +15 -23
- transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +1 -0
- transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +28 -28
- transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +14 -15
- transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +9 -10
- transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +9 -10
- transformers/models/prophetnet/configuration_prophetnet.py +26 -28
- transformers/models/prophetnet/modeling_prophetnet.py +109 -130
- transformers/models/prophetnet/tokenization_prophetnet.py +14 -16
- transformers/models/pvt/configuration_pvt.py +0 -1
- transformers/models/pvt/image_processing_pvt.py +17 -20
- transformers/models/pvt/image_processing_pvt_fast.py +0 -1
- transformers/models/pvt/modeling_pvt.py +19 -21
- transformers/models/pvt_v2/configuration_pvt_v2.py +2 -4
- transformers/models/pvt_v2/modeling_pvt_v2.py +21 -23
- transformers/models/qwen2/configuration_qwen2.py +18 -21
- transformers/models/qwen2/modeling_qwen2.py +31 -33
- transformers/models/qwen2/modular_qwen2.py +11 -12
- transformers/models/qwen2/tokenization_qwen2.py +2 -5
- transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +20 -23
- transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +135 -128
- transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +116 -109
- transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +41 -49
- transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +22 -25
- transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +94 -96
- transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +46 -85
- transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py +7 -43
- transformers/models/qwen2_audio/configuration_qwen2_audio.py +0 -1
- transformers/models/qwen2_audio/modeling_qwen2_audio.py +27 -29
- transformers/models/qwen2_audio/processing_qwen2_audio.py +13 -42
- transformers/models/qwen2_moe/configuration_qwen2_moe.py +28 -31
- transformers/models/qwen2_moe/modeling_qwen2_moe.py +36 -39
- transformers/models/qwen2_moe/modular_qwen2_moe.py +7 -10
- transformers/models/qwen2_vl/configuration_qwen2_vl.py +22 -24
- transformers/models/qwen2_vl/image_processing_qwen2_vl.py +38 -40
- transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +8 -9
- transformers/models/qwen2_vl/modeling_qwen2_vl.py +91 -92
- transformers/models/qwen2_vl/processing_qwen2_vl.py +7 -44
- transformers/models/qwen2_vl/video_processing_qwen2_vl.py +35 -13
- transformers/models/qwen3/configuration_qwen3.py +20 -23
- transformers/models/qwen3/modeling_qwen3.py +31 -34
- transformers/models/qwen3/modular_qwen3.py +4 -6
- transformers/models/qwen3_moe/configuration_qwen3_moe.py +25 -28
- transformers/models/qwen3_moe/modeling_qwen3_moe.py +36 -39
- transformers/models/qwen3_moe/modular_qwen3_moe.py +10 -13
- transformers/models/qwen3_next/configuration_qwen3_next.py +31 -34
- transformers/models/qwen3_next/modeling_qwen3_next.py +39 -42
- transformers/models/qwen3_next/modular_qwen3_next.py +33 -34
- transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +85 -88
- transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +107 -110
- transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +122 -148
- transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +40 -48
- transformers/models/qwen3_vl/configuration_qwen3_vl.py +16 -19
- transformers/models/qwen3_vl/modeling_qwen3_vl.py +74 -77
- transformers/models/qwen3_vl/modular_qwen3_vl.py +68 -105
- transformers/models/qwen3_vl/processing_qwen3_vl.py +6 -42
- transformers/models/qwen3_vl/video_processing_qwen3_vl.py +10 -12
- transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +21 -25
- transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +80 -83
- transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +33 -36
- transformers/models/rag/configuration_rag.py +0 -1
- transformers/models/rag/modeling_rag.py +116 -118
- transformers/models/rag/retrieval_rag.py +2 -4
- transformers/models/rag/tokenization_rag.py +0 -50
- transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +21 -24
- transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +31 -34
- transformers/models/reformer/configuration_reformer.py +0 -1
- transformers/models/reformer/modeling_reformer.py +67 -68
- transformers/models/reformer/tokenization_reformer.py +3 -6
- transformers/models/regnet/configuration_regnet.py +0 -1
- transformers/models/regnet/modeling_regnet.py +7 -9
- transformers/models/rembert/configuration_rembert.py +0 -1
- transformers/models/rembert/modeling_rembert.py +108 -110
- transformers/models/rembert/tokenization_rembert.py +1 -4
- transformers/models/resnet/configuration_resnet.py +0 -1
- transformers/models/resnet/modeling_resnet.py +8 -10
- transformers/models/roberta/configuration_roberta.py +0 -1
- transformers/models/roberta/modeling_roberta.py +91 -93
- transformers/models/roberta/modular_roberta.py +55 -58
- transformers/models/roberta/tokenization_roberta.py +2 -5
- transformers/models/roberta/tokenization_roberta_old.py +2 -4
- transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +0 -1
- transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +91 -93
- transformers/models/roc_bert/configuration_roc_bert.py +0 -1
- transformers/models/roc_bert/modeling_roc_bert.py +119 -121
- transformers/models/roc_bert/tokenization_roc_bert.py +88 -94
- transformers/models/roformer/configuration_roformer.py +0 -1
- transformers/models/roformer/modeling_roformer.py +79 -81
- transformers/models/roformer/tokenization_roformer.py +3 -6
- transformers/models/roformer/tokenization_utils.py +0 -1
- transformers/models/rt_detr/configuration_rt_detr.py +0 -1
- transformers/models/rt_detr/configuration_rt_detr_resnet.py +0 -1
- transformers/models/rt_detr/image_processing_rt_detr.py +54 -55
- transformers/models/rt_detr/image_processing_rt_detr_fast.py +15 -15
- transformers/models/rt_detr/modeling_rt_detr.py +80 -82
- transformers/models/rt_detr/modeling_rt_detr_resnet.py +2 -4
- transformers/models/rt_detr/modular_rt_detr.py +14 -14
- transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +0 -1
- transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +79 -81
- transformers/models/rt_detr_v2/modular_rt_detr_v2.py +2 -4
- transformers/models/rwkv/configuration_rwkv.py +0 -1
- transformers/models/rwkv/modeling_rwkv.py +29 -31
- transformers/models/sam/configuration_sam.py +0 -1
- transformers/models/sam/image_processing_sam.py +59 -60
- transformers/models/sam/image_processing_sam_fast.py +21 -22
- transformers/models/sam/modeling_sam.py +33 -35
- transformers/models/sam/processing_sam.py +39 -27
- transformers/models/sam2/configuration_sam2.py +0 -1
- transformers/models/sam2/image_processing_sam2_fast.py +14 -15
- transformers/models/sam2/modeling_sam2.py +45 -47
- transformers/models/sam2/modular_sam2.py +43 -44
- transformers/models/sam2/processing_sam2.py +31 -47
- transformers/models/sam2_video/configuration_sam2_video.py +0 -1
- transformers/models/sam2_video/modeling_sam2_video.py +69 -70
- transformers/models/sam2_video/modular_sam2_video.py +60 -79
- transformers/models/sam2_video/processing_sam2_video.py +49 -66
- transformers/models/sam2_video/video_processing_sam2_video.py +1 -4
- transformers/models/sam3/configuration_sam3.py +0 -1
- transformers/models/sam3/image_processing_sam3_fast.py +17 -20
- transformers/models/sam3/modeling_sam3.py +54 -56
- transformers/models/sam3/modular_sam3.py +3 -8
- transformers/models/sam3/processing_sam3.py +29 -48
- transformers/models/sam3_tracker/__init__.py +0 -1
- transformers/models/sam3_tracker/configuration_sam3_tracker.py +0 -1
- transformers/models/sam3_tracker/modeling_sam3_tracker.py +34 -36
- transformers/models/sam3_tracker/modular_sam3_tracker.py +0 -1
- transformers/models/sam3_tracker/processing_sam3_tracker.py +31 -47
- transformers/models/sam3_tracker_video/__init__.py +0 -1
- transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +0 -1
- transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +70 -70
- transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +2 -4
- transformers/models/sam3_tracker_video/processing_sam3_tracker_video.py +50 -66
- transformers/models/sam3_video/configuration_sam3_video.py +0 -1
- transformers/models/sam3_video/modeling_sam3_video.py +29 -31
- transformers/models/sam3_video/processing_sam3_video.py +25 -45
- transformers/models/sam_hq/__init__.py +1 -1
- transformers/models/sam_hq/configuration_sam_hq.py +0 -1
- transformers/models/sam_hq/modeling_sam_hq.py +39 -41
- transformers/models/sam_hq/modular_sam_hq.py +17 -19
- transformers/models/sam_hq/{processing_samhq.py → processing_sam_hq.py} +39 -28
- transformers/models/seamless_m4t/configuration_seamless_m4t.py +0 -1
- transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +8 -11
- transformers/models/seamless_m4t/modeling_seamless_m4t.py +180 -182
- transformers/models/seamless_m4t/processing_seamless_m4t.py +18 -39
- transformers/models/seamless_m4t/tokenization_seamless_m4t.py +15 -20
- transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +0 -1
- transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +193 -195
- transformers/models/seed_oss/configuration_seed_oss.py +23 -25
- transformers/models/seed_oss/modeling_seed_oss.py +30 -32
- transformers/models/seed_oss/modular_seed_oss.py +3 -4
- transformers/models/segformer/configuration_segformer.py +0 -10
- transformers/models/segformer/image_processing_segformer.py +39 -42
- transformers/models/segformer/image_processing_segformer_fast.py +7 -8
- transformers/models/segformer/modeling_segformer.py +24 -26
- transformers/models/segformer/modular_segformer.py +5 -6
- transformers/models/seggpt/configuration_seggpt.py +0 -1
- transformers/models/seggpt/image_processing_seggpt.py +38 -41
- transformers/models/seggpt/modeling_seggpt.py +28 -30
- transformers/models/sew/configuration_sew.py +0 -1
- transformers/models/sew/modeling_sew.py +33 -35
- transformers/models/sew/modular_sew.py +10 -12
- transformers/models/sew_d/configuration_sew_d.py +0 -1
- transformers/models/sew_d/modeling_sew_d.py +28 -30
- transformers/models/shieldgemma2/configuration_shieldgemma2.py +0 -1
- transformers/models/shieldgemma2/modeling_shieldgemma2.py +15 -17
- transformers/models/shieldgemma2/processing_shieldgemma2.py +3 -5
- transformers/models/siglip/configuration_siglip.py +0 -1
- transformers/models/siglip/image_processing_siglip.py +17 -20
- transformers/models/siglip/image_processing_siglip_fast.py +0 -1
- transformers/models/siglip/modeling_siglip.py +38 -39
- transformers/models/siglip/processing_siglip.py +2 -14
- transformers/models/siglip/tokenization_siglip.py +6 -7
- transformers/models/siglip2/configuration_siglip2.py +1 -1
- transformers/models/siglip2/image_processing_siglip2.py +15 -16
- transformers/models/siglip2/image_processing_siglip2_fast.py +4 -5
- transformers/models/siglip2/modeling_siglip2.py +54 -54
- transformers/models/siglip2/modular_siglip2.py +23 -25
- transformers/models/siglip2/processing_siglip2.py +2 -14
- transformers/models/smollm3/configuration_smollm3.py +23 -26
- transformers/models/smollm3/modeling_smollm3.py +31 -34
- transformers/models/smollm3/modular_smollm3.py +27 -29
- transformers/models/smolvlm/configuration_smolvlm.py +1 -1
- transformers/models/smolvlm/image_processing_smolvlm.py +42 -43
- transformers/models/smolvlm/image_processing_smolvlm_fast.py +12 -12
- transformers/models/smolvlm/modeling_smolvlm.py +51 -52
- transformers/models/smolvlm/modular_smolvlm.py +15 -17
- transformers/models/smolvlm/processing_smolvlm.py +15 -76
- transformers/models/smolvlm/video_processing_smolvlm.py +7 -8
- transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +0 -1
- transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +20 -23
- transformers/models/speech_to_text/configuration_speech_to_text.py +0 -1
- transformers/models/speech_to_text/feature_extraction_speech_to_text.py +10 -13
- transformers/models/speech_to_text/modeling_speech_to_text.py +52 -54
- transformers/models/speech_to_text/processing_speech_to_text.py +4 -30
- transformers/models/speech_to_text/tokenization_speech_to_text.py +5 -6
- transformers/models/speecht5/configuration_speecht5.py +0 -1
- transformers/models/speecht5/feature_extraction_speecht5.py +16 -37
- transformers/models/speecht5/modeling_speecht5.py +172 -174
- transformers/models/speecht5/number_normalizer.py +0 -1
- transformers/models/speecht5/processing_speecht5.py +3 -37
- transformers/models/speecht5/tokenization_speecht5.py +4 -5
- transformers/models/splinter/configuration_splinter.py +0 -1
- transformers/models/splinter/modeling_splinter.py +54 -56
- transformers/models/splinter/tokenization_splinter.py +2 -4
- transformers/models/squeezebert/configuration_squeezebert.py +0 -1
- transformers/models/squeezebert/modeling_squeezebert.py +60 -62
- transformers/models/squeezebert/tokenization_squeezebert.py +0 -1
- transformers/models/stablelm/configuration_stablelm.py +20 -23
- transformers/models/stablelm/modeling_stablelm.py +39 -42
- transformers/models/starcoder2/configuration_starcoder2.py +19 -22
- transformers/models/starcoder2/modeling_starcoder2.py +33 -36
- transformers/models/starcoder2/modular_starcoder2.py +13 -15
- transformers/models/superglue/configuration_superglue.py +3 -3
- transformers/models/superglue/image_processing_superglue.py +15 -15
- transformers/models/superglue/image_processing_superglue_fast.py +4 -5
- transformers/models/superglue/modeling_superglue.py +32 -33
- transformers/models/superpoint/image_processing_superpoint.py +15 -15
- transformers/models/superpoint/image_processing_superpoint_fast.py +4 -5
- transformers/models/superpoint/modeling_superpoint.py +13 -14
- transformers/models/swiftformer/configuration_swiftformer.py +0 -1
- transformers/models/swiftformer/modeling_swiftformer.py +12 -14
- transformers/models/swin/configuration_swin.py +0 -1
- transformers/models/swin/modeling_swin.py +58 -70
- transformers/models/swin2sr/configuration_swin2sr.py +0 -1
- transformers/models/swin2sr/image_processing_swin2sr.py +10 -13
- transformers/models/swin2sr/image_processing_swin2sr_fast.py +2 -5
- transformers/models/swin2sr/modeling_swin2sr.py +26 -28
- transformers/models/swinv2/configuration_swinv2.py +0 -1
- transformers/models/swinv2/modeling_swinv2.py +55 -67
- transformers/models/switch_transformers/configuration_switch_transformers.py +0 -1
- transformers/models/switch_transformers/modeling_switch_transformers.py +32 -33
- transformers/models/switch_transformers/modular_switch_transformers.py +29 -30
- transformers/models/t5/configuration_t5.py +0 -1
- transformers/models/t5/modeling_t5.py +75 -77
- transformers/models/t5/tokenization_t5.py +1 -3
- transformers/models/t5gemma/configuration_t5gemma.py +33 -34
- transformers/models/t5gemma/modeling_t5gemma.py +96 -99
- transformers/models/t5gemma/modular_t5gemma.py +117 -118
- transformers/models/t5gemma2/configuration_t5gemma2.py +53 -54
- transformers/models/t5gemma2/modeling_t5gemma2.py +96 -99
- transformers/models/t5gemma2/modular_t5gemma2.py +134 -135
- transformers/models/table_transformer/configuration_table_transformer.py +0 -1
- transformers/models/table_transformer/modeling_table_transformer.py +46 -48
- transformers/models/tapas/configuration_tapas.py +0 -1
- transformers/models/tapas/modeling_tapas.py +64 -66
- transformers/models/tapas/tokenization_tapas.py +115 -153
- transformers/models/textnet/configuration_textnet.py +0 -1
- transformers/models/textnet/image_processing_textnet.py +22 -25
- transformers/models/textnet/image_processing_textnet_fast.py +5 -6
- transformers/models/textnet/modeling_textnet.py +13 -14
- transformers/models/time_series_transformer/configuration_time_series_transformer.py +5 -8
- transformers/models/time_series_transformer/modeling_time_series_transformer.py +79 -81
- transformers/models/timesfm/configuration_timesfm.py +0 -1
- transformers/models/timesfm/modeling_timesfm.py +17 -19
- transformers/models/timesfm/modular_timesfm.py +16 -18
- transformers/models/timesformer/configuration_timesformer.py +0 -1
- transformers/models/timesformer/modeling_timesformer.py +13 -16
- transformers/models/timm_backbone/configuration_timm_backbone.py +0 -1
- transformers/models/timm_backbone/modeling_timm_backbone.py +4 -6
- transformers/models/timm_wrapper/configuration_timm_wrapper.py +2 -3
- transformers/models/timm_wrapper/image_processing_timm_wrapper.py +4 -5
- transformers/models/timm_wrapper/modeling_timm_wrapper.py +13 -15
- transformers/models/trocr/configuration_trocr.py +0 -1
- transformers/models/trocr/modeling_trocr.py +38 -40
- transformers/models/trocr/processing_trocr.py +5 -25
- transformers/models/tvp/configuration_tvp.py +0 -1
- transformers/models/tvp/image_processing_tvp.py +50 -52
- transformers/models/tvp/image_processing_tvp_fast.py +9 -10
- transformers/models/tvp/modeling_tvp.py +25 -27
- transformers/models/tvp/processing_tvp.py +2 -14
- transformers/models/udop/configuration_udop.py +0 -1
- transformers/models/udop/modeling_udop.py +63 -66
- transformers/models/udop/processing_udop.py +7 -26
- transformers/models/udop/tokenization_udop.py +80 -93
- transformers/models/umt5/configuration_umt5.py +0 -1
- transformers/models/umt5/modeling_umt5.py +80 -81
- transformers/models/unispeech/configuration_unispeech.py +0 -1
- transformers/models/unispeech/modeling_unispeech.py +47 -49
- transformers/models/unispeech/modular_unispeech.py +20 -22
- transformers/models/unispeech_sat/configuration_unispeech_sat.py +0 -1
- transformers/models/unispeech_sat/modeling_unispeech_sat.py +63 -65
- transformers/models/unispeech_sat/modular_unispeech_sat.py +21 -23
- transformers/models/univnet/feature_extraction_univnet.py +14 -14
- transformers/models/univnet/modeling_univnet.py +7 -8
- transformers/models/upernet/configuration_upernet.py +0 -1
- transformers/models/upernet/modeling_upernet.py +10 -13
- transformers/models/vaultgemma/__init__.py +0 -1
- transformers/models/vaultgemma/configuration_vaultgemma.py +24 -26
- transformers/models/vaultgemma/modeling_vaultgemma.py +34 -36
- transformers/models/vaultgemma/modular_vaultgemma.py +29 -31
- transformers/models/video_llama_3/image_processing_video_llama_3.py +40 -40
- transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +8 -8
- transformers/models/video_llama_3/modeling_video_llama_3.py +66 -66
- transformers/models/video_llama_3/modular_video_llama_3.py +101 -112
- transformers/models/video_llama_3/processing_video_llama_3.py +5 -39
- transformers/models/video_llama_3/video_processing_video_llama_3.py +18 -18
- transformers/models/video_llava/configuration_video_llava.py +0 -1
- transformers/models/video_llava/image_processing_video_llava.py +35 -38
- transformers/models/video_llava/modeling_video_llava.py +52 -54
- transformers/models/video_llava/processing_video_llava.py +38 -78
- transformers/models/video_llava/video_processing_video_llava.py +0 -1
- transformers/models/videomae/configuration_videomae.py +0 -1
- transformers/models/videomae/image_processing_videomae.py +31 -34
- transformers/models/videomae/modeling_videomae.py +13 -15
- transformers/models/videomae/video_processing_videomae.py +0 -1
- transformers/models/vilt/configuration_vilt.py +0 -1
- transformers/models/vilt/image_processing_vilt.py +29 -30
- transformers/models/vilt/image_processing_vilt_fast.py +9 -10
- transformers/models/vilt/modeling_vilt.py +76 -78
- transformers/models/vilt/processing_vilt.py +2 -14
- transformers/models/vipllava/configuration_vipllava.py +0 -1
- transformers/models/vipllava/modeling_vipllava.py +38 -39
- transformers/models/vipllava/modular_vipllava.py +30 -32
- transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +0 -1
- transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +18 -21
- transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +0 -1
- transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +18 -21
- transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +2 -16
- transformers/models/visual_bert/configuration_visual_bert.py +0 -1
- transformers/models/visual_bert/modeling_visual_bert.py +90 -92
- transformers/models/vit/configuration_vit.py +0 -1
- transformers/models/vit/image_processing_vit.py +19 -22
- transformers/models/vit/image_processing_vit_fast.py +0 -1
- transformers/models/vit/modeling_vit.py +13 -15
- transformers/models/vit_mae/configuration_vit_mae.py +0 -1
- transformers/models/vit_mae/modeling_vit_mae.py +21 -23
- transformers/models/vit_msn/configuration_vit_msn.py +0 -1
- transformers/models/vit_msn/modeling_vit_msn.py +10 -12
- transformers/models/vitdet/configuration_vitdet.py +0 -1
- transformers/models/vitdet/modeling_vitdet.py +12 -14
- transformers/models/vitmatte/configuration_vitmatte.py +1 -4
- transformers/models/vitmatte/image_processing_vitmatte.py +15 -18
- transformers/models/vitmatte/image_processing_vitmatte_fast.py +14 -15
- transformers/models/vitmatte/modeling_vitmatte.py +9 -11
- transformers/models/vitpose/configuration_vitpose.py +3 -6
- transformers/models/vitpose/image_processing_vitpose.py +24 -25
- transformers/models/vitpose/image_processing_vitpose_fast.py +9 -10
- transformers/models/vitpose/modeling_vitpose.py +10 -12
- transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +0 -1
- transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +8 -10
- transformers/models/vits/configuration_vits.py +0 -1
- transformers/models/vits/modeling_vits.py +34 -35
- transformers/models/vits/tokenization_vits.py +3 -4
- transformers/models/vivit/configuration_vivit.py +0 -1
- transformers/models/vivit/image_processing_vivit.py +36 -39
- transformers/models/vivit/modeling_vivit.py +5 -7
- transformers/models/vjepa2/__init__.py +0 -1
- transformers/models/vjepa2/configuration_vjepa2.py +0 -1
- transformers/models/vjepa2/modeling_vjepa2.py +30 -32
- transformers/models/vjepa2/video_processing_vjepa2.py +0 -1
- transformers/models/voxtral/__init__.py +0 -1
- transformers/models/voxtral/configuration_voxtral.py +0 -1
- transformers/models/voxtral/modeling_voxtral.py +17 -25
- transformers/models/voxtral/modular_voxtral.py +10 -19
- transformers/models/voxtral/processing_voxtral.py +25 -48
- transformers/models/wav2vec2/configuration_wav2vec2.py +0 -1
- transformers/models/wav2vec2/feature_extraction_wav2vec2.py +7 -10
- transformers/models/wav2vec2/modeling_wav2vec2.py +67 -122
- transformers/models/wav2vec2/processing_wav2vec2.py +6 -35
- transformers/models/wav2vec2/tokenization_wav2vec2.py +20 -332
- transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +0 -1
- transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +49 -52
- transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +45 -48
- transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +6 -35
- transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +0 -1
- transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +62 -65
- transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +15 -18
- transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +16 -17
- transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +36 -55
- transformers/models/wavlm/configuration_wavlm.py +0 -1
- transformers/models/wavlm/modeling_wavlm.py +45 -48
- transformers/models/wavlm/modular_wavlm.py +4 -5
- transformers/models/whisper/configuration_whisper.py +0 -1
- transformers/models/whisper/english_normalizer.py +3 -4
- transformers/models/whisper/feature_extraction_whisper.py +9 -24
- transformers/models/whisper/generation_whisper.py +26 -48
- transformers/models/whisper/modeling_whisper.py +68 -70
- transformers/models/whisper/processing_whisper.py +3 -20
- transformers/models/whisper/tokenization_whisper.py +9 -30
- transformers/models/x_clip/configuration_x_clip.py +0 -1
- transformers/models/x_clip/modeling_x_clip.py +68 -69
- transformers/models/x_clip/processing_x_clip.py +2 -14
- transformers/models/xcodec/configuration_xcodec.py +4 -6
- transformers/models/xcodec/modeling_xcodec.py +15 -17
- transformers/models/xglm/configuration_xglm.py +0 -1
- transformers/models/xglm/modeling_xglm.py +49 -55
- transformers/models/xglm/tokenization_xglm.py +1 -4
- transformers/models/xlm/configuration_xlm.py +0 -1
- transformers/models/xlm/modeling_xlm.py +126 -130
- transformers/models/xlm/tokenization_xlm.py +3 -5
- transformers/models/xlm_roberta/configuration_xlm_roberta.py +0 -1
- transformers/models/xlm_roberta/modeling_xlm_roberta.py +90 -92
- transformers/models/xlm_roberta/modular_xlm_roberta.py +50 -53
- transformers/models/xlm_roberta/tokenization_xlm_roberta.py +1 -4
- transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +0 -1
- transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +91 -93
- transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py +67 -70
- transformers/models/xlnet/configuration_xlnet.py +0 -11
- transformers/models/xlnet/modeling_xlnet.py +149 -162
- transformers/models/xlnet/tokenization_xlnet.py +1 -4
- transformers/models/xlstm/configuration_xlstm.py +3 -5
- transformers/models/xlstm/modeling_xlstm.py +62 -65
- transformers/models/xmod/configuration_xmod.py +0 -1
- transformers/models/xmod/modeling_xmod.py +98 -100
- transformers/models/yolos/configuration_yolos.py +0 -1
- transformers/models/yolos/image_processing_yolos.py +60 -62
- transformers/models/yolos/image_processing_yolos_fast.py +18 -18
- transformers/models/yolos/modeling_yolos.py +12 -14
- transformers/models/yolos/modular_yolos.py +2 -4
- transformers/models/yoso/configuration_yoso.py +0 -1
- transformers/models/yoso/modeling_yoso.py +60 -62
- transformers/models/zamba/configuration_zamba.py +0 -1
- transformers/models/zamba/modeling_zamba.py +68 -69
- transformers/models/zamba2/configuration_zamba2.py +36 -37
- transformers/models/zamba2/modeling_zamba2.py +84 -87
- transformers/models/zamba2/modular_zamba2.py +43 -45
- transformers/models/zoedepth/configuration_zoedepth.py +0 -1
- transformers/models/zoedepth/image_processing_zoedepth.py +28 -29
- transformers/models/zoedepth/image_processing_zoedepth_fast.py +11 -12
- transformers/models/zoedepth/modeling_zoedepth.py +14 -16
- transformers/pipelines/__init__.py +50 -49
- transformers/pipelines/any_to_any.py +14 -22
- transformers/pipelines/audio_utils.py +1 -2
- transformers/pipelines/base.py +12 -16
- transformers/pipelines/deprecated/__init__.py +0 -1
- transformers/pipelines/image_text_to_text.py +0 -1
- transformers/pipelines/image_to_text.py +4 -44
- transformers/pipelines/question_answering.py +4 -43
- transformers/pipelines/text_classification.py +1 -14
- transformers/pipelines/token_classification.py +1 -22
- transformers/pipelines/video_classification.py +1 -9
- transformers/pipelines/zero_shot_audio_classification.py +0 -1
- transformers/pipelines/zero_shot_classification.py +0 -6
- transformers/pipelines/zero_shot_image_classification.py +0 -7
- transformers/processing_utils.py +95 -95
- transformers/quantizers/base.py +10 -0
- transformers/quantizers/quantizer_quark.py +0 -1
- transformers/quantizers/quantizer_torchao.py +3 -3
- transformers/testing_utils.py +3 -37
- transformers/tokenization_mistral_common.py +554 -903
- transformers/tokenization_utils_base.py +109 -122
- transformers/tokenization_utils_sentencepiece.py +5 -6
- transformers/tokenization_utils_tokenizers.py +5 -5
- transformers/trainer.py +6 -9
- transformers/trainer_jit_checkpoint.py +1 -2
- transformers/training_args.py +3 -3
- transformers/utils/attention_visualizer.py +1 -1
- transformers/utils/auto_docstring.py +564 -12
- transformers/utils/doc.py +1 -1
- transformers/utils/dummy_pt_objects.py +0 -42
- transformers/utils/generic.py +1 -1
- transformers/utils/loading_report.py +3 -3
- transformers/utils/quantization_config.py +8 -10
- transformers/video_processing_utils.py +19 -20
- transformers/video_utils.py +18 -22
- {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/METADATA +19 -19
- transformers-5.0.0rc3.dist-info/RECORD +2067 -0
- transformers-5.0.0rc2.dist-info/RECORD +0 -2042
- {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/WHEEL +0 -0
- {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/entry_points.txt +0 -0
- {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/licenses/LICENSE +0 -0
- {transformers-5.0.0rc2.dist-info → transformers-5.0.0rc3.dist-info}/top_level.txt +0 -0
|
@@ -4,7 +4,6 @@
|
|
|
4
4
|
# the file from the modular. If any change should be done, please apply the change to the
|
|
5
5
|
# modular_granitemoeshared.py file directly. One of our CI enforces this.
|
|
6
6
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
7
|
-
# coding=utf-8
|
|
8
7
|
# Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved.
|
|
9
8
|
#
|
|
10
9
|
#
|
|
@@ -20,7 +19,7 @@
|
|
|
20
19
|
# See the License for the specific language governing permissions and
|
|
21
20
|
# limitations under the License.
|
|
22
21
|
from collections.abc import Callable
|
|
23
|
-
from typing import Optional, TypedDict
|
|
22
|
+
from typing import Optional, TypedDict
|
|
24
23
|
|
|
25
24
|
import torch
|
|
26
25
|
from torch import nn
|
|
@@ -47,17 +46,16 @@ class GraniteFlashAttentionKwargs(TypedDict, total=False):
|
|
|
47
46
|
Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage.
|
|
48
47
|
Use cases include padding-free training and fewer `torch.compile` graph breaks.
|
|
49
48
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
Index of each packed sequence.
|
|
49
|
+
cu_seq_lens_q (`torch.LongTensor`):
|
|
50
|
+
Gets cumulative sequence length for query state.
|
|
51
|
+
cu_seq_lens_k (`torch.LongTensor`):
|
|
52
|
+
Gets cumulative sequence length for key state.
|
|
53
|
+
max_length_q (`int`):
|
|
54
|
+
Maximum sequence length for query state.
|
|
55
|
+
max_length_k (`int`):
|
|
56
|
+
Maximum sequence length for key state.
|
|
57
|
+
seq_idx (`torch.IntTensor):
|
|
58
|
+
Index of each packed sequence.
|
|
61
59
|
"""
|
|
62
60
|
|
|
63
61
|
cu_seq_lens_q: torch.LongTensor
|
|
@@ -263,7 +261,7 @@ def rotate_half(x):
|
|
|
263
261
|
|
|
264
262
|
|
|
265
263
|
@use_kernel_func_from_hub("rotary_pos_emb")
|
|
266
|
-
def apply_rotary_pos_emb(q, k, cos, sin,
|
|
264
|
+
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
|
|
267
265
|
"""Applies Rotary Position Embedding to the query and key tensors.
|
|
268
266
|
|
|
269
267
|
Args:
|
|
@@ -271,8 +269,6 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
|
|
271
269
|
k (`torch.Tensor`): The key tensor.
|
|
272
270
|
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
|
273
271
|
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
|
274
|
-
position_ids (`torch.Tensor`, *optional*):
|
|
275
|
-
Deprecated and unused.
|
|
276
272
|
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
|
277
273
|
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
|
278
274
|
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
|
@@ -307,7 +303,7 @@ def eager_attention_forward(
|
|
|
307
303
|
query: torch.Tensor,
|
|
308
304
|
key: torch.Tensor,
|
|
309
305
|
value: torch.Tensor,
|
|
310
|
-
attention_mask:
|
|
306
|
+
attention_mask: torch.Tensor | None,
|
|
311
307
|
scaling: float,
|
|
312
308
|
dropout: float = 0.0,
|
|
313
309
|
**kwargs: Unpack[TransformersKwargs],
|
|
@@ -358,10 +354,10 @@ class GraniteMoeSharedAttention(nn.Module):
|
|
|
358
354
|
def forward(
|
|
359
355
|
self,
|
|
360
356
|
hidden_states: torch.Tensor,
|
|
361
|
-
position_embeddings:
|
|
362
|
-
attention_mask:
|
|
363
|
-
past_key_values:
|
|
364
|
-
cache_position:
|
|
357
|
+
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
|
|
358
|
+
attention_mask: torch.Tensor | None = None,
|
|
359
|
+
past_key_values: Cache | None = None,
|
|
360
|
+
cache_position: torch.LongTensor | None = None,
|
|
365
361
|
**kwargs: Unpack[TransformersKwargs],
|
|
366
362
|
) -> tuple[torch.Tensor, torch.Tensor]:
|
|
367
363
|
input_shape = hidden_states.shape[:-1]
|
|
@@ -413,15 +409,15 @@ class GraniteMoeSharedDecoderLayer(GradientCheckpointingLayer):
|
|
|
413
409
|
def forward(
|
|
414
410
|
self,
|
|
415
411
|
hidden_states: torch.Tensor,
|
|
416
|
-
attention_mask:
|
|
417
|
-
position_ids:
|
|
418
|
-
past_key_values:
|
|
419
|
-
output_attentions:
|
|
420
|
-
use_cache:
|
|
421
|
-
cache_position:
|
|
422
|
-
position_embeddings:
|
|
412
|
+
attention_mask: torch.Tensor | None = None,
|
|
413
|
+
position_ids: torch.LongTensor | None = None,
|
|
414
|
+
past_key_values: Cache | None = None,
|
|
415
|
+
output_attentions: bool | None = False,
|
|
416
|
+
use_cache: bool | None = False,
|
|
417
|
+
cache_position: torch.LongTensor | None = None,
|
|
418
|
+
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
|
|
423
419
|
**kwargs: Unpack[GraniteFlashAttentionKwargs],
|
|
424
|
-
) -> tuple[torch.FloatTensor,
|
|
420
|
+
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
|
|
425
421
|
residual = hidden_states
|
|
426
422
|
hidden_states = self.input_layernorm(hidden_states)
|
|
427
423
|
|
|
@@ -497,9 +493,9 @@ class GraniteMoeSharedRotaryEmbedding(nn.Module):
|
|
|
497
493
|
|
|
498
494
|
@staticmethod
|
|
499
495
|
def compute_default_rope_parameters(
|
|
500
|
-
config:
|
|
496
|
+
config: GraniteMoeSharedConfig | None = None,
|
|
501
497
|
device: Optional["torch.device"] = None,
|
|
502
|
-
seq_len:
|
|
498
|
+
seq_len: int | None = None,
|
|
503
499
|
) -> tuple["torch.Tensor", float]:
|
|
504
500
|
"""
|
|
505
501
|
Computes the inverse frequencies according to the original RoPE implementation
|
|
@@ -564,13 +560,13 @@ class GraniteMoeSharedModel(GraniteMoeSharedPreTrainedModel):
|
|
|
564
560
|
@auto_docstring
|
|
565
561
|
def forward(
|
|
566
562
|
self,
|
|
567
|
-
input_ids:
|
|
568
|
-
attention_mask:
|
|
569
|
-
position_ids:
|
|
570
|
-
past_key_values:
|
|
571
|
-
inputs_embeds:
|
|
572
|
-
use_cache:
|
|
573
|
-
cache_position:
|
|
563
|
+
input_ids: torch.LongTensor | None = None,
|
|
564
|
+
attention_mask: torch.Tensor | None = None,
|
|
565
|
+
position_ids: torch.LongTensor | None = None,
|
|
566
|
+
past_key_values: Cache | None = None,
|
|
567
|
+
inputs_embeds: torch.FloatTensor | None = None,
|
|
568
|
+
use_cache: bool | None = None,
|
|
569
|
+
cache_position: torch.LongTensor | None = None,
|
|
574
570
|
**kwargs: Unpack[TransformersKwargs],
|
|
575
571
|
) -> MoeModelOutputWithPast:
|
|
576
572
|
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
@@ -625,11 +621,11 @@ class GraniteMoeSharedModel(GraniteMoeSharedPreTrainedModel):
|
|
|
625
621
|
|
|
626
622
|
|
|
627
623
|
def load_balancing_loss_func(
|
|
628
|
-
gate_logits:
|
|
629
|
-
num_experts:
|
|
624
|
+
gate_logits: torch.Tensor | tuple[torch.Tensor] | None,
|
|
625
|
+
num_experts: int | None = None,
|
|
630
626
|
top_k=2,
|
|
631
|
-
attention_mask:
|
|
632
|
-
) ->
|
|
627
|
+
attention_mask: torch.Tensor | None = None,
|
|
628
|
+
) -> torch.Tensor | int:
|
|
633
629
|
r"""
|
|
634
630
|
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
|
|
635
631
|
|
|
@@ -729,17 +725,17 @@ class GraniteMoeSharedForCausalLM(GraniteMoeSharedPreTrainedModel, GenerationMix
|
|
|
729
725
|
@can_return_tuple
|
|
730
726
|
def forward(
|
|
731
727
|
self,
|
|
732
|
-
input_ids:
|
|
733
|
-
attention_mask:
|
|
734
|
-
position_ids:
|
|
735
|
-
past_key_values:
|
|
736
|
-
inputs_embeds:
|
|
737
|
-
labels:
|
|
738
|
-
output_router_logits:
|
|
739
|
-
cache_position:
|
|
740
|
-
logits_to_keep:
|
|
728
|
+
input_ids: torch.LongTensor | None = None,
|
|
729
|
+
attention_mask: torch.Tensor | None = None,
|
|
730
|
+
position_ids: torch.LongTensor | None = None,
|
|
731
|
+
past_key_values: Cache | None = None,
|
|
732
|
+
inputs_embeds: torch.FloatTensor | None = None,
|
|
733
|
+
labels: torch.LongTensor | None = None,
|
|
734
|
+
output_router_logits: bool | None = None,
|
|
735
|
+
cache_position: torch.LongTensor | None = None,
|
|
736
|
+
logits_to_keep: int | torch.Tensor = 0,
|
|
741
737
|
**kwargs,
|
|
742
|
-
) ->
|
|
738
|
+
) -> tuple | MoeCausalLMOutputWithPast:
|
|
743
739
|
r"""
|
|
744
740
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
745
741
|
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# coding=utf-8
|
|
2
1
|
# Copyright 2024 IBM and the HuggingFace Inc. team. All rights reserved.
|
|
3
2
|
#
|
|
4
3
|
#
|
|
@@ -13,7 +12,7 @@
|
|
|
13
12
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
13
|
# See the License for the specific language governing permissions and
|
|
15
14
|
# limitations under the License.
|
|
16
|
-
from typing import
|
|
15
|
+
from typing import TypedDict
|
|
17
16
|
|
|
18
17
|
import torch
|
|
19
18
|
from torch import nn
|
|
@@ -39,17 +38,16 @@ class GraniteFlashAttentionKwargs(TypedDict, total=False):
|
|
|
39
38
|
Keyword arguments for advanced Flash Attention, causal-conv1d, and mamba_ssm kernel usage.
|
|
40
39
|
Use cases include padding-free training and fewer `torch.compile` graph breaks.
|
|
41
40
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
Index of each packed sequence.
|
|
41
|
+
cu_seq_lens_q (`torch.LongTensor`):
|
|
42
|
+
Gets cumulative sequence length for query state.
|
|
43
|
+
cu_seq_lens_k (`torch.LongTensor`):
|
|
44
|
+
Gets cumulative sequence length for key state.
|
|
45
|
+
max_length_q (`int`):
|
|
46
|
+
Maximum sequence length for query state.
|
|
47
|
+
max_length_k (`int`):
|
|
48
|
+
Maximum sequence length for key state.
|
|
49
|
+
seq_idx (`torch.IntTensor):
|
|
50
|
+
Index of each packed sequence.
|
|
53
51
|
"""
|
|
54
52
|
|
|
55
53
|
cu_seq_lens_q: torch.LongTensor
|
|
@@ -93,15 +91,15 @@ class GraniteMoeSharedDecoderLayer(GraniteMoeDecoderLayer):
|
|
|
93
91
|
def forward(
|
|
94
92
|
self,
|
|
95
93
|
hidden_states: torch.Tensor,
|
|
96
|
-
attention_mask:
|
|
97
|
-
position_ids:
|
|
98
|
-
past_key_values:
|
|
99
|
-
output_attentions:
|
|
100
|
-
use_cache:
|
|
101
|
-
cache_position:
|
|
102
|
-
position_embeddings:
|
|
94
|
+
attention_mask: torch.Tensor | None = None,
|
|
95
|
+
position_ids: torch.LongTensor | None = None,
|
|
96
|
+
past_key_values: Cache | None = None,
|
|
97
|
+
output_attentions: bool | None = False,
|
|
98
|
+
use_cache: bool | None = False,
|
|
99
|
+
cache_position: torch.LongTensor | None = None,
|
|
100
|
+
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
|
|
103
101
|
**kwargs: Unpack[GraniteFlashAttentionKwargs],
|
|
104
|
-
) -> tuple[torch.FloatTensor,
|
|
102
|
+
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
|
|
105
103
|
residual = hidden_states
|
|
106
104
|
hidden_states = self.input_layernorm(hidden_states)
|
|
107
105
|
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# coding=utf-8
|
|
2
1
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
|
|
3
2
|
#
|
|
4
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -18,7 +17,7 @@ import io
|
|
|
18
17
|
import pathlib
|
|
19
18
|
from collections import defaultdict
|
|
20
19
|
from collections.abc import Iterable
|
|
21
|
-
from typing import TYPE_CHECKING, Any
|
|
20
|
+
from typing import TYPE_CHECKING, Any
|
|
22
21
|
|
|
23
22
|
import numpy as np
|
|
24
23
|
|
|
@@ -74,7 +73,6 @@ if is_vision_available():
|
|
|
74
73
|
|
|
75
74
|
if is_scipy_available():
|
|
76
75
|
import scipy.special
|
|
77
|
-
import scipy.stats
|
|
78
76
|
|
|
79
77
|
if TYPE_CHECKING:
|
|
80
78
|
from .modeling_grounding_dino import GroundingDinoObjectDetectionOutput
|
|
@@ -82,7 +80,7 @@ if TYPE_CHECKING:
|
|
|
82
80
|
|
|
83
81
|
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
|
84
82
|
|
|
85
|
-
AnnotationType = dict[str,
|
|
83
|
+
AnnotationType = dict[str, int | str | list[dict]]
|
|
86
84
|
|
|
87
85
|
|
|
88
86
|
class AnnotationFormat(ExplicitEnum):
|
|
@@ -109,18 +107,18 @@ class GroundingDinoImageProcessorKwargs(ImagesKwargs, total=False):
|
|
|
109
107
|
Path to the directory containing the segmentation masks.
|
|
110
108
|
"""
|
|
111
109
|
|
|
112
|
-
format:
|
|
110
|
+
format: str | AnnotationFormat
|
|
113
111
|
do_convert_annotations: bool
|
|
114
112
|
return_segmentation_masks: bool
|
|
115
|
-
annotations:
|
|
116
|
-
masks_path:
|
|
113
|
+
annotations: AnnotationType | list[AnnotationType] | None
|
|
114
|
+
masks_path: str | pathlib.Path | None
|
|
117
115
|
|
|
118
116
|
|
|
119
117
|
def get_resize_output_image_size(
|
|
120
118
|
input_image: np.ndarray,
|
|
121
|
-
size:
|
|
122
|
-
max_size:
|
|
123
|
-
input_data_format:
|
|
119
|
+
size: int | tuple[int, int] | list[int],
|
|
120
|
+
max_size: int | None = None,
|
|
121
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
124
122
|
) -> tuple[int, int]:
|
|
125
123
|
"""
|
|
126
124
|
Computes the output image size given the input image size and the desired output size. If the desired output size
|
|
@@ -149,7 +147,7 @@ def get_image_size_for_max_height_width(
|
|
|
149
147
|
input_image: np.ndarray,
|
|
150
148
|
max_height: int,
|
|
151
149
|
max_width: int,
|
|
152
|
-
input_data_format:
|
|
150
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
153
151
|
) -> tuple[int, int]:
|
|
154
152
|
"""
|
|
155
153
|
Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio.
|
|
@@ -181,7 +179,7 @@ def get_image_size_for_max_height_width(
|
|
|
181
179
|
|
|
182
180
|
|
|
183
181
|
# Copied from transformers.models.detr.image_processing_detr.safe_squeeze
|
|
184
|
-
def safe_squeeze(arr: np.ndarray, axis:
|
|
182
|
+
def safe_squeeze(arr: np.ndarray, axis: int | None = None) -> np.ndarray:
|
|
185
183
|
"""
|
|
186
184
|
Squeezes an array, but only if the axis specified has dim 1.
|
|
187
185
|
"""
|
|
@@ -219,7 +217,7 @@ def max_across_indices(values: Iterable[Any]) -> list[Any]:
|
|
|
219
217
|
|
|
220
218
|
# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
|
|
221
219
|
def get_max_height_width(
|
|
222
|
-
images: list[np.ndarray], input_data_format:
|
|
220
|
+
images: list[np.ndarray], input_data_format: str | ChannelDimension | None = None
|
|
223
221
|
) -> list[int]:
|
|
224
222
|
"""
|
|
225
223
|
Get the maximum height and width across all images in a batch.
|
|
@@ -238,7 +236,7 @@ def get_max_height_width(
|
|
|
238
236
|
|
|
239
237
|
# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
|
|
240
238
|
def make_pixel_mask(
|
|
241
|
-
image: np.ndarray, output_size: tuple[int, int], input_data_format:
|
|
239
|
+
image: np.ndarray, output_size: tuple[int, int], input_data_format: str | ChannelDimension | None = None
|
|
242
240
|
) -> np.ndarray:
|
|
243
241
|
"""
|
|
244
242
|
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
|
|
@@ -295,7 +293,7 @@ def prepare_coco_detection_annotation(
|
|
|
295
293
|
image,
|
|
296
294
|
target,
|
|
297
295
|
return_segmentation_masks: bool = False,
|
|
298
|
-
input_data_format:
|
|
296
|
+
input_data_format: ChannelDimension | str | None = None,
|
|
299
297
|
):
|
|
300
298
|
"""
|
|
301
299
|
Convert the target in COCO format into the format expected by GroundingDino.
|
|
@@ -390,9 +388,9 @@ def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
|
|
|
390
388
|
def prepare_coco_panoptic_annotation(
|
|
391
389
|
image: np.ndarray,
|
|
392
390
|
target: dict,
|
|
393
|
-
masks_path:
|
|
391
|
+
masks_path: str | pathlib.Path,
|
|
394
392
|
return_masks: bool = True,
|
|
395
|
-
input_data_format:
|
|
393
|
+
input_data_format: ChannelDimension | str = None,
|
|
396
394
|
) -> dict:
|
|
397
395
|
"""
|
|
398
396
|
Prepare a coco panoptic annotation for GroundingDino.
|
|
@@ -711,8 +709,8 @@ def compute_segments(
|
|
|
711
709
|
pred_labels,
|
|
712
710
|
mask_threshold: float = 0.5,
|
|
713
711
|
overlap_mask_area_threshold: float = 0.8,
|
|
714
|
-
label_ids_to_fuse:
|
|
715
|
-
target_size:
|
|
712
|
+
label_ids_to_fuse: set[int] | None = None,
|
|
713
|
+
target_size: tuple[int, int] | None = None,
|
|
716
714
|
):
|
|
717
715
|
height = mask_probs.shape[1] if target_size is None else target_size[0]
|
|
718
716
|
width = mask_probs.shape[2] if target_size is None else target_size[1]
|
|
@@ -854,18 +852,18 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
854
852
|
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
|
|
855
853
|
def __init__(
|
|
856
854
|
self,
|
|
857
|
-
format:
|
|
855
|
+
format: str | AnnotationFormat = AnnotationFormat.COCO_DETECTION,
|
|
858
856
|
do_resize: bool = True,
|
|
859
|
-
size:
|
|
857
|
+
size: dict[str, int] | None = None,
|
|
860
858
|
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
|
861
859
|
do_rescale: bool = True,
|
|
862
|
-
rescale_factor:
|
|
860
|
+
rescale_factor: int | float = 1 / 255,
|
|
863
861
|
do_normalize: bool = True,
|
|
864
|
-
image_mean:
|
|
865
|
-
image_std:
|
|
866
|
-
do_convert_annotations:
|
|
862
|
+
image_mean: float | list[float] | None = None,
|
|
863
|
+
image_std: float | list[float] | None = None,
|
|
864
|
+
do_convert_annotations: bool | None = None,
|
|
867
865
|
do_pad: bool = True,
|
|
868
|
-
pad_size:
|
|
866
|
+
pad_size: dict[str, int] | None = None,
|
|
869
867
|
**kwargs,
|
|
870
868
|
) -> None:
|
|
871
869
|
max_size = None if size is None else kwargs.pop("max_size", 1333)
|
|
@@ -916,10 +914,10 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
916
914
|
self,
|
|
917
915
|
image: np.ndarray,
|
|
918
916
|
target: dict,
|
|
919
|
-
format:
|
|
920
|
-
return_segmentation_masks:
|
|
921
|
-
masks_path:
|
|
922
|
-
input_data_format:
|
|
917
|
+
format: AnnotationFormat | None = None,
|
|
918
|
+
return_segmentation_masks: bool | None = None,
|
|
919
|
+
masks_path: str | pathlib.Path | None = None,
|
|
920
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
923
921
|
) -> dict:
|
|
924
922
|
"""
|
|
925
923
|
Prepare an annotation for feeding into GroundingDino model.
|
|
@@ -950,8 +948,8 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
950
948
|
image: np.ndarray,
|
|
951
949
|
size: dict[str, int],
|
|
952
950
|
resample: PILImageResampling = PILImageResampling.BILINEAR,
|
|
953
|
-
data_format:
|
|
954
|
-
input_data_format:
|
|
951
|
+
data_format: ChannelDimension | None = None,
|
|
952
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
955
953
|
**kwargs,
|
|
956
954
|
) -> np.ndarray:
|
|
957
955
|
"""
|
|
@@ -1024,8 +1022,8 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
1024
1022
|
self,
|
|
1025
1023
|
image: np.ndarray,
|
|
1026
1024
|
rescale_factor: float,
|
|
1027
|
-
data_format:
|
|
1028
|
-
input_data_format:
|
|
1025
|
+
data_format: str | ChannelDimension | None = None,
|
|
1026
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
1029
1027
|
) -> np.ndarray:
|
|
1030
1028
|
"""
|
|
1031
1029
|
Rescale the image by the given factor. image = image * rescale_factor.
|
|
@@ -1105,10 +1103,10 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
1105
1103
|
self,
|
|
1106
1104
|
image: np.ndarray,
|
|
1107
1105
|
output_size: tuple[int, int],
|
|
1108
|
-
annotation:
|
|
1109
|
-
constant_values:
|
|
1110
|
-
data_format:
|
|
1111
|
-
input_data_format:
|
|
1106
|
+
annotation: dict[str, Any] | None = None,
|
|
1107
|
+
constant_values: float | Iterable[float] = 0,
|
|
1108
|
+
data_format: ChannelDimension | None = None,
|
|
1109
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
1112
1110
|
update_bboxes: bool = True,
|
|
1113
1111
|
) -> np.ndarray:
|
|
1114
1112
|
"""
|
|
@@ -1138,14 +1136,14 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
1138
1136
|
def pad(
|
|
1139
1137
|
self,
|
|
1140
1138
|
images: list[np.ndarray],
|
|
1141
|
-
annotations:
|
|
1142
|
-
constant_values:
|
|
1139
|
+
annotations: AnnotationType | list[AnnotationType] | None = None,
|
|
1140
|
+
constant_values: float | Iterable[float] = 0,
|
|
1143
1141
|
return_pixel_mask: bool = True,
|
|
1144
|
-
return_tensors:
|
|
1145
|
-
data_format:
|
|
1146
|
-
input_data_format:
|
|
1142
|
+
return_tensors: str | TensorType | None = None,
|
|
1143
|
+
data_format: ChannelDimension | None = None,
|
|
1144
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
1147
1145
|
update_bboxes: bool = True,
|
|
1148
|
-
pad_size:
|
|
1146
|
+
pad_size: dict[str, int] | None = None,
|
|
1149
1147
|
) -> BatchFeature:
|
|
1150
1148
|
"""
|
|
1151
1149
|
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
|
|
@@ -1222,24 +1220,24 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
1222
1220
|
def preprocess(
|
|
1223
1221
|
self,
|
|
1224
1222
|
images: ImageInput,
|
|
1225
|
-
annotations:
|
|
1226
|
-
return_segmentation_masks:
|
|
1227
|
-
masks_path:
|
|
1228
|
-
do_resize:
|
|
1229
|
-
size:
|
|
1223
|
+
annotations: AnnotationType | list[AnnotationType] | None = None,
|
|
1224
|
+
return_segmentation_masks: bool | None = None,
|
|
1225
|
+
masks_path: str | pathlib.Path | None = None,
|
|
1226
|
+
do_resize: bool | None = None,
|
|
1227
|
+
size: dict[str, int] | None = None,
|
|
1230
1228
|
resample=None, # PILImageResampling
|
|
1231
|
-
do_rescale:
|
|
1232
|
-
rescale_factor:
|
|
1233
|
-
do_normalize:
|
|
1234
|
-
do_convert_annotations:
|
|
1235
|
-
image_mean:
|
|
1236
|
-
image_std:
|
|
1237
|
-
do_pad:
|
|
1238
|
-
format:
|
|
1239
|
-
return_tensors:
|
|
1240
|
-
data_format:
|
|
1241
|
-
input_data_format:
|
|
1242
|
-
pad_size:
|
|
1229
|
+
do_rescale: bool | None = None,
|
|
1230
|
+
rescale_factor: int | float | None = None,
|
|
1231
|
+
do_normalize: bool | None = None,
|
|
1232
|
+
do_convert_annotations: bool | None = None,
|
|
1233
|
+
image_mean: float | list[float] | None = None,
|
|
1234
|
+
image_std: float | list[float] | None = None,
|
|
1235
|
+
do_pad: bool | None = None,
|
|
1236
|
+
format: str | AnnotationFormat | None = None,
|
|
1237
|
+
return_tensors: TensorType | str | None = None,
|
|
1238
|
+
data_format: str | ChannelDimension = ChannelDimension.FIRST,
|
|
1239
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
1240
|
+
pad_size: dict[str, int] | None = None,
|
|
1243
1241
|
**kwargs,
|
|
1244
1242
|
) -> BatchFeature:
|
|
1245
1243
|
"""
|
|
@@ -1472,7 +1470,7 @@ class GroundingDinoImageProcessor(BaseImageProcessor):
|
|
|
1472
1470
|
self,
|
|
1473
1471
|
outputs: "GroundingDinoObjectDetectionOutput",
|
|
1474
1472
|
threshold: float = 0.1,
|
|
1475
|
-
target_sizes:
|
|
1473
|
+
target_sizes: TensorType | list[tuple] | None = None,
|
|
1476
1474
|
):
|
|
1477
1475
|
"""
|
|
1478
1476
|
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
|
|
@@ -4,7 +4,6 @@
|
|
|
4
4
|
# the file from the modular. If any change should be done, please apply the change to the
|
|
5
5
|
# modular_grounding_dino.py file directly. One of our CI enforces this.
|
|
6
6
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
7
|
-
# coding=utf-8
|
|
8
7
|
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
|
|
9
8
|
#
|
|
10
9
|
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
|
@@ -25,7 +24,7 @@
|
|
|
25
24
|
# limitations under the License.
|
|
26
25
|
|
|
27
26
|
import pathlib
|
|
28
|
-
from typing import TYPE_CHECKING, Any, Optional
|
|
27
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
29
28
|
|
|
30
29
|
import torch
|
|
31
30
|
from torchvision.io import read_image
|
|
@@ -102,7 +101,7 @@ def prepare_coco_detection_annotation(
|
|
|
102
101
|
image,
|
|
103
102
|
target,
|
|
104
103
|
return_segmentation_masks: bool = False,
|
|
105
|
-
input_data_format:
|
|
104
|
+
input_data_format: ChannelDimension | str | None = None,
|
|
106
105
|
):
|
|
107
106
|
"""
|
|
108
107
|
Convert the target in COCO format into the format expected by GROUNDING_DINO.
|
|
@@ -213,9 +212,9 @@ def rgb_to_id(color):
|
|
|
213
212
|
def prepare_coco_panoptic_annotation(
|
|
214
213
|
image: torch.Tensor,
|
|
215
214
|
target: dict,
|
|
216
|
-
masks_path:
|
|
215
|
+
masks_path: str | pathlib.Path,
|
|
217
216
|
return_masks: bool = True,
|
|
218
|
-
input_data_format:
|
|
217
|
+
input_data_format: ChannelDimension | str = None,
|
|
219
218
|
) -> dict:
|
|
220
219
|
"""
|
|
221
220
|
Prepare a coco panoptic annotation for GROUNDING_DINO.
|
|
@@ -323,10 +322,10 @@ class GroundingDinoImageProcessorFast(BaseImageProcessorFast):
|
|
|
323
322
|
self,
|
|
324
323
|
image: torch.Tensor,
|
|
325
324
|
target: dict,
|
|
326
|
-
format:
|
|
327
|
-
return_segmentation_masks:
|
|
328
|
-
masks_path:
|
|
329
|
-
input_data_format:
|
|
325
|
+
format: AnnotationFormat | None = None,
|
|
326
|
+
return_segmentation_masks: bool | None = None,
|
|
327
|
+
masks_path: str | pathlib.Path | None = None,
|
|
328
|
+
input_data_format: str | ChannelDimension | None = None,
|
|
330
329
|
) -> dict:
|
|
331
330
|
"""
|
|
332
331
|
Prepare an annotation for feeding into GROUNDING_DINO model.
|
|
@@ -512,7 +511,7 @@ class GroundingDinoImageProcessorFast(BaseImageProcessorFast):
|
|
|
512
511
|
self,
|
|
513
512
|
image: torch.Tensor,
|
|
514
513
|
padded_size: tuple[int, int],
|
|
515
|
-
annotation:
|
|
514
|
+
annotation: dict[str, Any] | None = None,
|
|
516
515
|
update_bboxes: bool = True,
|
|
517
516
|
fill: int = 0,
|
|
518
517
|
):
|
|
@@ -541,8 +540,8 @@ class GroundingDinoImageProcessorFast(BaseImageProcessorFast):
|
|
|
541
540
|
def _preprocess(
|
|
542
541
|
self,
|
|
543
542
|
images: list["torch.Tensor"],
|
|
544
|
-
annotations:
|
|
545
|
-
masks_path:
|
|
543
|
+
annotations: AnnotationType | list[AnnotationType] | None,
|
|
544
|
+
masks_path: str | pathlib.Path | None,
|
|
546
545
|
return_segmentation_masks: bool,
|
|
547
546
|
do_resize: bool,
|
|
548
547
|
size: SizeDict,
|
|
@@ -551,12 +550,12 @@ class GroundingDinoImageProcessorFast(BaseImageProcessorFast):
|
|
|
551
550
|
rescale_factor: float,
|
|
552
551
|
do_normalize: bool,
|
|
553
552
|
do_convert_annotations: bool,
|
|
554
|
-
image_mean:
|
|
555
|
-
image_std:
|
|
553
|
+
image_mean: float | list[float] | None,
|
|
554
|
+
image_std: float | list[float] | None,
|
|
556
555
|
do_pad: bool,
|
|
557
|
-
pad_size:
|
|
558
|
-
format:
|
|
559
|
-
return_tensors:
|
|
556
|
+
pad_size: SizeDict | None,
|
|
557
|
+
format: str | AnnotationFormat | None,
|
|
558
|
+
return_tensors: str | TensorType | None,
|
|
560
559
|
**kwargs,
|
|
561
560
|
) -> BatchFeature:
|
|
562
561
|
"""
|
|
@@ -658,7 +657,7 @@ class GroundingDinoImageProcessorFast(BaseImageProcessorFast):
|
|
|
658
657
|
self,
|
|
659
658
|
outputs: "GroundingDinoObjectDetectionOutput",
|
|
660
659
|
threshold: float = 0.1,
|
|
661
|
-
target_sizes:
|
|
660
|
+
target_sizes: TensorType | list[tuple] | None = None,
|
|
662
661
|
):
|
|
663
662
|
"""
|
|
664
663
|
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
|