transformers 5.0.0rc0__py3-none-any.whl → 5.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- transformers/__init__.py +30 -3
- transformers/cli/serve.py +47 -17
- transformers/conversion_mapping.py +15 -2
- transformers/convert_slow_tokenizer.py +225 -10
- transformers/core_model_loading.py +196 -135
- transformers/data/data_collator.py +12 -4
- transformers/dependency_versions_table.py +1 -2
- transformers/dynamic_module_utils.py +1 -2
- transformers/feature_extraction_utils.py +1 -2
- transformers/file_utils.py +0 -1
- transformers/generation/__init__.py +11 -1
- transformers/generation/configuration_utils.py +3 -2
- transformers/generation/continuous_batching/__init__.py +4 -0
- transformers/generation/continuous_batching/continuous_api.py +134 -79
- transformers/image_processing_base.py +1 -2
- transformers/integrations/__init__.py +4 -2
- transformers/integrations/accelerate.py +15 -3
- transformers/integrations/aqlm.py +38 -66
- transformers/integrations/awq.py +48 -514
- transformers/integrations/bitnet.py +45 -100
- transformers/integrations/bitsandbytes.py +79 -191
- transformers/integrations/deepspeed.py +1 -0
- transformers/integrations/eetq.py +84 -79
- transformers/integrations/fbgemm_fp8.py +191 -145
- transformers/integrations/finegrained_fp8.py +236 -193
- transformers/integrations/fp_quant.py +92 -0
- transformers/integrations/ggml.py +11 -1
- transformers/integrations/higgs.py +40 -62
- transformers/integrations/hub_kernels.py +42 -3
- transformers/integrations/integration_utils.py +10 -0
- transformers/integrations/mxfp4.py +25 -65
- transformers/integrations/peft.py +7 -29
- transformers/integrations/quanto.py +73 -55
- transformers/integrations/quark.py +55 -0
- transformers/integrations/spqr.py +44 -90
- transformers/integrations/torchao.py +32 -38
- transformers/integrations/vptq.py +42 -59
- transformers/modelcard.py +1 -2
- transformers/modeling_gguf_pytorch_utils.py +8 -0
- transformers/modeling_rope_utils.py +30 -6
- transformers/modeling_utils.py +116 -112
- transformers/models/__init__.py +3 -0
- transformers/models/afmoe/modeling_afmoe.py +4 -4
- transformers/models/albert/tokenization_albert.py +6 -12
- transformers/models/align/modeling_align.py +2 -0
- transformers/models/altclip/modeling_altclip.py +4 -0
- transformers/models/apertus/modeling_apertus.py +4 -4
- transformers/models/arcee/modeling_arcee.py +4 -4
- transformers/models/aria/modeling_aria.py +4 -4
- transformers/models/audioflamingo3/modeling_audioflamingo3.py +1 -0
- transformers/models/audioflamingo3/modular_audioflamingo3.py +1 -0
- transformers/models/auto/configuration_auto.py +11 -0
- transformers/models/auto/feature_extraction_auto.py +2 -0
- transformers/models/auto/image_processing_auto.py +1 -0
- transformers/models/auto/modeling_auto.py +6 -0
- transformers/models/auto/processing_auto.py +18 -10
- transformers/models/auto/tokenization_auto.py +74 -472
- transformers/models/autoformer/modeling_autoformer.py +4 -0
- transformers/models/bamba/modeling_bamba.py +4 -3
- transformers/models/bark/modeling_bark.py +2 -0
- transformers/models/bart/modeling_bart.py +7 -0
- transformers/models/barthez/tokenization_barthez.py +5 -10
- transformers/models/beit/modeling_beit.py +6 -1
- transformers/models/bert/tokenization_bert.py +8 -21
- transformers/models/big_bird/modeling_big_bird.py +6 -0
- transformers/models/big_bird/tokenization_big_bird.py +18 -42
- transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +8 -2
- transformers/models/biogpt/modeling_biogpt.py +2 -0
- transformers/models/biogpt/modular_biogpt.py +2 -0
- transformers/models/bit/modeling_bit.py +11 -2
- transformers/models/bitnet/modeling_bitnet.py +4 -4
- transformers/models/blenderbot/modeling_blenderbot.py +5 -0
- transformers/models/blenderbot/tokenization_blenderbot.py +12 -16
- transformers/models/blenderbot_small/modeling_blenderbot_small.py +5 -0
- transformers/models/blip/modeling_blip_text.py +2 -0
- transformers/models/blip_2/modeling_blip_2.py +2 -1
- transformers/models/bloom/modeling_bloom.py +4 -0
- transformers/models/blt/modeling_blt.py +2 -2
- transformers/models/blt/modular_blt.py +2 -2
- transformers/models/bridgetower/modeling_bridgetower.py +5 -1
- transformers/models/bros/modeling_bros.py +4 -0
- transformers/models/camembert/tokenization_camembert.py +8 -12
- transformers/models/canine/modeling_canine.py +5 -0
- transformers/models/chameleon/modeling_chameleon.py +2 -1
- transformers/models/chinese_clip/modeling_chinese_clip.py +3 -0
- transformers/models/clap/modeling_clap.py +5 -0
- transformers/models/clip/tokenization_clip.py +22 -44
- transformers/models/clipseg/modeling_clipseg.py +5 -0
- transformers/models/clvp/modeling_clvp.py +5 -0
- transformers/models/clvp/tokenization_clvp.py +1 -63
- transformers/models/code_llama/tokenization_code_llama.py +20 -43
- transformers/models/codegen/tokenization_codegen.py +14 -43
- transformers/models/cohere/modeling_cohere.py +4 -3
- transformers/models/cohere/modular_cohere.py +2 -1
- transformers/models/cohere/tokenization_cohere.py +12 -42
- transformers/models/cohere2/modeling_cohere2.py +7 -6
- transformers/models/cohere2/modular_cohere2.py +5 -5
- transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +4 -3
- transformers/models/cohere2_vision/modular_cohere2_vision.py +4 -3
- transformers/models/colqwen2/modeling_colqwen2.py +1 -0
- transformers/models/colqwen2/modular_colqwen2.py +1 -0
- transformers/models/conditional_detr/modeling_conditional_detr.py +5 -0
- transformers/models/convbert/modeling_convbert.py +6 -0
- transformers/models/convnext/modeling_convnext.py +2 -4
- transformers/models/convnextv2/modeling_convnextv2.py +2 -4
- transformers/models/csm/modeling_csm.py +4 -3
- transformers/models/ctrl/modeling_ctrl.py +1 -0
- transformers/models/cvt/modeling_cvt.py +2 -0
- transformers/models/cwm/modeling_cwm.py +4 -4
- transformers/models/d_fine/modeling_d_fine.py +2 -0
- transformers/models/d_fine/modular_d_fine.py +1 -0
- transformers/models/dab_detr/modeling_dab_detr.py +4 -0
- transformers/models/dac/modeling_dac.py +2 -2
- transformers/models/data2vec/modeling_data2vec_audio.py +5 -0
- transformers/models/data2vec/modeling_data2vec_vision.py +4 -1
- transformers/models/dbrx/modeling_dbrx.py +2 -2
- transformers/models/deberta/modeling_deberta.py +5 -0
- transformers/models/deberta/tokenization_deberta.py +11 -20
- transformers/models/deberta_v2/modeling_deberta_v2.py +6 -0
- transformers/models/deberta_v2/tokenization_deberta_v2.py +13 -28
- transformers/models/decision_transformer/modeling_decision_transformer.py +4 -1
- transformers/models/deepseek_v2/modeling_deepseek_v2.py +2 -3
- transformers/models/deepseek_v2/modular_deepseek_v2.py +2 -2
- transformers/models/deepseek_v3/modeling_deepseek_v3.py +3 -2
- transformers/models/deepseek_v3/modular_deepseek_v3.py +1 -0
- transformers/models/deformable_detr/modeling_deformable_detr.py +4 -0
- transformers/models/depth_anything/modeling_depth_anything.py +1 -0
- transformers/models/depth_pro/modeling_depth_pro.py +2 -0
- transformers/models/detr/modeling_detr.py +5 -0
- transformers/models/dia/modeling_dia.py +4 -3
- transformers/models/dia/modular_dia.py +0 -1
- transformers/models/diffllama/modeling_diffllama.py +2 -2
- transformers/models/dinat/modeling_dinat.py +3 -0
- transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +1 -1
- transformers/models/dinov3_vit/modeling_dinov3_vit.py +2 -2
- transformers/models/dinov3_vit/modular_dinov3_vit.py +2 -2
- transformers/models/distilbert/tokenization_distilbert.py +13 -0
- transformers/models/doge/modeling_doge.py +2 -3
- transformers/models/doge/modular_doge.py +0 -1
- transformers/models/donut/modeling_donut_swin.py +2 -0
- transformers/models/dots1/modeling_dots1.py +10 -7
- transformers/models/dots1/modular_dots1.py +5 -3
- transformers/models/dpr/modeling_dpr.py +5 -0
- transformers/models/dpr/tokenization_dpr.py +12 -0
- transformers/models/edgetam/modeling_edgetam.py +1 -1
- transformers/models/edgetam_video/modeling_edgetam_video.py +1 -0
- transformers/models/edgetam_video/modular_edgetam_video.py +1 -0
- transformers/models/efficientloftr/modeling_efficientloftr.py +2 -2
- transformers/models/efficientnet/modeling_efficientnet.py +2 -0
- transformers/models/emu3/modeling_emu3.py +4 -4
- transformers/models/eomt/image_processing_eomt.py +13 -1
- transformers/models/eomt/image_processing_eomt_fast.py +14 -2
- transformers/models/ernie4_5/modeling_ernie4_5.py +4 -4
- transformers/models/ernie4_5/modular_ernie4_5.py +2 -1
- transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +5 -5
- transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +2 -2
- transformers/models/esm/modeling_esmfold.py +5 -4
- transformers/models/evolla/modeling_evolla.py +4 -4
- transformers/models/exaone4/modeling_exaone4.py +2 -2
- transformers/models/exaone4/modular_exaone4.py +0 -1
- transformers/models/falcon/modeling_falcon.py +6 -1
- transformers/models/falcon_h1/modeling_falcon_h1.py +4 -3
- transformers/models/falcon_mamba/modeling_falcon_mamba.py +25 -35
- transformers/models/falcon_mamba/modular_falcon_mamba.py +12 -31
- transformers/{kernels/falcon_mamba → models/fast_vlm}/__init__.py +15 -3
- transformers/models/fast_vlm/configuration_fast_vlm.py +137 -0
- transformers/models/fast_vlm/modeling_fast_vlm.py +455 -0
- transformers/models/fast_vlm/modular_fast_vlm.py +273 -0
- transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +8 -3
- transformers/models/flaubert/modeling_flaubert.py +7 -0
- transformers/models/flava/modeling_flava.py +6 -1
- transformers/models/flex_olmo/modeling_flex_olmo.py +4 -5
- transformers/models/florence2/modeling_florence2.py +2 -1
- transformers/models/florence2/modular_florence2.py +2 -1
- transformers/models/fnet/modeling_fnet.py +7 -0
- transformers/models/focalnet/modeling_focalnet.py +4 -0
- transformers/models/fsmt/modeling_fsmt.py +2 -0
- transformers/models/funnel/modeling_funnel.py +8 -0
- transformers/models/funnel/tokenization_funnel.py +17 -24
- transformers/models/fuyu/processing_fuyu.py +3 -3
- transformers/models/gemma/modeling_gemma.py +4 -4
- transformers/models/gemma/tokenization_gemma.py +10 -27
- transformers/models/gemma2/modeling_gemma2.py +4 -4
- transformers/models/gemma2/modular_gemma2.py +2 -1
- transformers/models/gemma3/modeling_gemma3.py +14 -84
- transformers/models/gemma3/modular_gemma3.py +12 -81
- transformers/models/gemma3n/modeling_gemma3n.py +18 -209
- transformers/models/gemma3n/modular_gemma3n.py +17 -59
- transformers/models/git/modeling_git.py +2 -0
- transformers/models/glm/modeling_glm.py +4 -4
- transformers/models/glm4/modeling_glm4.py +4 -4
- transformers/models/glm4_moe/modeling_glm4_moe.py +5 -3
- transformers/models/glm4v/configuration_glm4v.py +3 -1
- transformers/models/glm4v/modeling_glm4v.py +3 -3
- transformers/models/glm4v/modular_glm4v.py +6 -4
- transformers/models/glm4v_moe/configuration_glm4v_moe.py +3 -1
- transformers/models/glm4v_moe/modeling_glm4v_moe.py +6 -5
- transformers/models/glm4v_moe/modular_glm4v_moe.py +1 -1
- transformers/models/glpn/modeling_glpn.py +2 -0
- transformers/models/gpt2/modeling_gpt2.py +5 -1
- transformers/models/gpt2/tokenization_gpt2.py +16 -44
- transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +1 -0
- transformers/models/gpt_neo/modeling_gpt_neo.py +4 -0
- transformers/models/gpt_neox/modeling_gpt_neox.py +5 -2
- transformers/models/gpt_neox/modular_gpt_neox.py +3 -0
- transformers/models/gpt_neox/tokenization_gpt_neox.py +10 -49
- transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +3 -1
- transformers/models/gpt_oss/modeling_gpt_oss.py +5 -6
- transformers/models/gpt_oss/modular_gpt_oss.py +3 -5
- transformers/models/gptj/modeling_gptj.py +3 -0
- transformers/models/granite/modeling_granite.py +4 -4
- transformers/models/granitemoe/modeling_granitemoe.py +4 -6
- transformers/models/granitemoe/modular_granitemoe.py +0 -2
- transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +4 -6
- transformers/models/granitemoeshared/modeling_granitemoeshared.py +4 -6
- transformers/models/grounding_dino/modeling_grounding_dino.py +4 -0
- transformers/models/groupvit/modeling_groupvit.py +3 -0
- transformers/models/helium/modeling_helium.py +4 -3
- transformers/models/herbert/tokenization_herbert.py +9 -25
- transformers/models/hgnet_v2/modeling_hgnet_v2.py +6 -1
- transformers/models/hgnet_v2/modular_hgnet_v2.py +6 -1
- transformers/models/hiera/modeling_hiera.py +4 -0
- transformers/models/hubert/modeling_hubert.py +3 -0
- transformers/models/hubert/modular_hubert.py +1 -0
- transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +4 -4
- transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +4 -4
- transformers/models/ibert/modeling_ibert.py +6 -0
- transformers/models/idefics/modeling_idefics.py +5 -21
- transformers/models/imagegpt/modeling_imagegpt.py +2 -1
- transformers/models/informer/modeling_informer.py +4 -0
- transformers/models/informer/modular_informer.py +1 -0
- transformers/models/internvl/modeling_internvl.py +2 -4
- transformers/models/internvl/modular_internvl.py +2 -4
- transformers/models/jamba/modeling_jamba.py +2 -2
- transformers/models/janus/modeling_janus.py +1 -0
- transformers/models/janus/modular_janus.py +1 -0
- transformers/models/jetmoe/modeling_jetmoe.py +2 -2
- transformers/models/kosmos2/modeling_kosmos2.py +1 -0
- transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +3 -1
- transformers/models/lasr/__init__.py +29 -0
- transformers/models/lasr/configuration_lasr.py +244 -0
- transformers/models/lasr/feature_extraction_lasr.py +277 -0
- transformers/models/lasr/modeling_lasr.py +729 -0
- transformers/models/lasr/modular_lasr.py +569 -0
- transformers/models/lasr/processing_lasr.py +96 -0
- transformers/models/lasr/tokenization_lasr.py +186 -0
- transformers/models/layoutlm/modeling_layoutlm.py +5 -0
- transformers/models/layoutlmv2/modeling_layoutlmv2.py +4 -0
- transformers/models/layoutlmv2/tokenization_layoutlmv2.py +10 -53
- transformers/models/layoutlmv3/modeling_layoutlmv3.py +4 -0
- transformers/models/layoutlmv3/tokenization_layoutlmv3.py +12 -61
- transformers/models/layoutxlm/tokenization_layoutxlm.py +13 -38
- transformers/models/led/modeling_led.py +6 -0
- transformers/models/levit/modeling_levit.py +3 -0
- transformers/models/lfm2/modeling_lfm2.py +4 -5
- transformers/models/lfm2/modular_lfm2.py +0 -1
- transformers/models/lfm2_moe/modeling_lfm2_moe.py +4 -5
- transformers/models/lightglue/modeling_lightglue.py +3 -1
- transformers/models/lightglue/modular_lightglue.py +1 -0
- transformers/models/lilt/modeling_lilt.py +4 -0
- transformers/models/llama/modeling_llama.py +4 -4
- transformers/models/llama/tokenization_llama.py +15 -43
- transformers/models/llama4/modeling_llama4.py +3 -2
- transformers/models/longcat_flash/modeling_longcat_flash.py +4 -4
- transformers/models/longcat_flash/modular_longcat_flash.py +2 -2
- transformers/models/longformer/modeling_longformer.py +6 -0
- transformers/models/longt5/modeling_longt5.py +4 -0
- transformers/models/luke/modeling_luke.py +9 -0
- transformers/models/luke/tokenization_luke.py +11 -38
- transformers/models/lxmert/modeling_lxmert.py +2 -0
- transformers/models/m2m_100/modeling_m2m_100.py +4 -0
- transformers/models/mamba/modeling_mamba.py +14 -22
- transformers/models/marian/modeling_marian.py +5 -0
- transformers/models/markuplm/modeling_markuplm.py +4 -0
- transformers/models/markuplm/tokenization_markuplm.py +28 -61
- transformers/models/mask2former/modeling_mask2former.py +2 -0
- transformers/models/maskformer/modeling_maskformer.py +2 -0
- transformers/models/maskformer/modeling_maskformer_swin.py +2 -0
- transformers/models/mbart/modeling_mbart.py +7 -0
- transformers/models/mbart/tokenization_mbart.py +11 -52
- transformers/models/mbart50/tokenization_mbart50.py +7 -10
- transformers/models/megatron_bert/modeling_megatron_bert.py +7 -0
- transformers/models/mgp_str/modeling_mgp_str.py +2 -0
- transformers/models/mimi/modeling_mimi.py +3 -1
- transformers/models/minimax/modeling_minimax.py +4 -4
- transformers/models/ministral/modeling_ministral.py +4 -4
- transformers/models/ministral3/configuration_ministral3.py +1 -1
- transformers/models/ministral3/modeling_ministral3.py +4 -3
- transformers/models/mistral/modeling_mistral.py +4 -3
- transformers/models/mixtral/modeling_mixtral.py +4 -4
- transformers/models/mllama/modeling_mllama.py +2 -2
- transformers/models/mluke/tokenization_mluke.py +6 -6
- transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +4 -0
- transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +2 -0
- transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +3 -0
- transformers/models/mobilevit/modeling_mobilevit.py +3 -0
- transformers/models/mobilevitv2/modeling_mobilevitv2.py +3 -0
- transformers/models/modernbert/modeling_modernbert.py +4 -1
- transformers/models/modernbert/modular_modernbert.py +2 -0
- transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +8 -9
- transformers/models/modernbert_decoder/modular_modernbert_decoder.py +6 -7
- transformers/models/moonshine/modeling_moonshine.py +4 -2
- transformers/models/moshi/modeling_moshi.py +5 -2
- transformers/models/mpnet/modeling_mpnet.py +5 -0
- transformers/models/mpnet/tokenization_mpnet.py +5 -13
- transformers/models/mpt/modeling_mpt.py +2 -0
- transformers/models/mra/modeling_mra.py +6 -0
- transformers/models/mt5/modeling_mt5.py +7 -0
- transformers/models/musicgen/modeling_musicgen.py +2 -0
- transformers/models/musicgen_melody/modeling_musicgen_melody.py +3 -0
- transformers/models/mvp/modeling_mvp.py +7 -0
- transformers/models/nanochat/modeling_nanochat.py +4 -4
- transformers/models/nemotron/modeling_nemotron.py +4 -2
- transformers/models/nllb/tokenization_nllb.py +8 -22
- transformers/models/nougat/tokenization_nougat.py +11 -59
- transformers/models/nystromformer/modeling_nystromformer.py +6 -0
- transformers/models/olmo/modeling_olmo.py +4 -4
- transformers/models/olmo/modular_olmo.py +2 -2
- transformers/models/olmo2/modeling_olmo2.py +4 -5
- transformers/models/olmo2/modular_olmo2.py +0 -1
- transformers/models/olmo3/modeling_olmo3.py +4 -4
- transformers/models/olmoe/modeling_olmoe.py +4 -4
- transformers/models/omdet_turbo/modeling_omdet_turbo.py +2 -0
- transformers/models/oneformer/modeling_oneformer.py +4 -1
- transformers/models/openai/modeling_openai.py +3 -0
- transformers/models/openai/tokenization_openai.py +10 -46
- transformers/models/opt/modeling_opt.py +2 -0
- transformers/models/owlv2/modeling_owlv2.py +4 -0
- transformers/models/owlvit/modeling_owlvit.py +4 -0
- transformers/models/paddleocr_vl/__init__.py +32 -0
- transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +336 -0
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +503 -0
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +209 -0
- transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +1668 -0
- transformers/models/paddleocr_vl/modular_paddleocr_vl.py +1349 -0
- transformers/models/paddleocr_vl/processing_paddleocr_vl.py +135 -0
- transformers/models/parakeet/configuration_parakeet.py +4 -6
- transformers/models/parakeet/modeling_parakeet.py +9 -6
- transformers/models/parakeet/modular_parakeet.py +2 -2
- transformers/models/parakeet/processing_parakeet.py +1 -0
- transformers/models/patchtsmixer/modeling_patchtsmixer.py +6 -0
- transformers/models/patchtst/modeling_patchtst.py +20 -2
- transformers/models/pegasus/modeling_pegasus.py +5 -0
- transformers/models/pegasus/tokenization_pegasus.py +17 -44
- transformers/models/pegasus_x/modeling_pegasus_x.py +4 -0
- transformers/models/perceiver/modeling_perceiver.py +8 -0
- transformers/models/persimmon/modeling_persimmon.py +2 -1
- transformers/models/phi/modeling_phi.py +4 -5
- transformers/models/phi/modular_phi.py +0 -1
- transformers/models/phi3/modeling_phi3.py +2 -1
- transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +5 -5
- transformers/models/phi4_multimodal/modular_phi4_multimodal.py +4 -4
- transformers/models/phimoe/modeling_phimoe.py +4 -4
- transformers/models/phimoe/modular_phimoe.py +2 -2
- transformers/models/pix2struct/modeling_pix2struct.py +2 -0
- transformers/models/pixtral/modeling_pixtral.py +2 -1
- transformers/models/plbart/modeling_plbart.py +6 -0
- transformers/models/plbart/modular_plbart.py +2 -0
- transformers/models/plbart/tokenization_plbart.py +0 -2
- transformers/models/poolformer/modeling_poolformer.py +2 -0
- transformers/models/pop2piano/modeling_pop2piano.py +2 -0
- transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +1 -0
- transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +1 -0
- transformers/models/prophetnet/modeling_prophetnet.py +3 -0
- transformers/models/pvt/modeling_pvt.py +2 -0
- transformers/models/pvt_v2/modeling_pvt_v2.py +3 -0
- transformers/models/qwen2/modeling_qwen2.py +4 -4
- transformers/models/qwen2/tokenization_qwen2.py +14 -18
- transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +4 -2
- transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +13 -16
- transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +14 -16
- transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +1 -1
- transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +5 -6
- transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +3 -5
- transformers/models/qwen2_audio/modeling_qwen2_audio.py +2 -0
- transformers/models/qwen2_moe/modeling_qwen2_moe.py +4 -4
- transformers/models/qwen2_vl/configuration_qwen2_vl.py +1 -1
- transformers/models/qwen2_vl/modeling_qwen2_vl.py +6 -16
- transformers/models/qwen3/modeling_qwen3.py +4 -4
- transformers/models/qwen3_moe/modeling_qwen3_moe.py +4 -4
- transformers/models/qwen3_next/modeling_qwen3_next.py +4 -3
- transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +21 -23
- transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +14 -16
- transformers/models/qwen3_vl/modeling_qwen3_vl.py +39 -37
- transformers/models/qwen3_vl/modular_qwen3_vl.py +37 -35
- transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +39 -37
- transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +4 -1
- transformers/models/rag/modeling_rag.py +1 -0
- transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +15 -1
- transformers/models/reformer/modeling_reformer.py +4 -0
- transformers/models/reformer/tokenization_reformer.py +11 -28
- transformers/models/regnet/modeling_regnet.py +6 -1
- transformers/models/rembert/modeling_rembert.py +6 -0
- transformers/models/rembert/tokenization_rembert.py +3 -10
- transformers/models/resnet/modeling_resnet.py +11 -2
- transformers/models/roberta/tokenization_roberta.py +18 -27
- transformers/models/roformer/modeling_roformer.py +6 -0
- transformers/models/roformer/tokenization_roformer.py +77 -412
- transformers/models/rt_detr/modeling_rt_detr.py +2 -0
- transformers/models/rt_detr/modeling_rt_detr_resnet.py +5 -1
- transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +2 -0
- transformers/models/rwkv/modeling_rwkv.py +1 -0
- transformers/models/sam2/modeling_sam2.py +2 -2
- transformers/models/sam2/modular_sam2.py +2 -2
- transformers/models/sam2_video/modeling_sam2_video.py +1 -0
- transformers/models/sam2_video/modular_sam2_video.py +1 -0
- transformers/models/sam3/modeling_sam3.py +77 -80
- transformers/models/sam3_tracker/modeling_sam3_tracker.py +6 -1
- transformers/models/sam3_tracker/modular_sam3_tracker.py +6 -1
- transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +1 -0
- transformers/models/sam3_video/modeling_sam3_video.py +1 -0
- transformers/models/seamless_m4t/modeling_seamless_m4t.py +5 -1
- transformers/models/seamless_m4t/tokenization_seamless_m4t.py +27 -59
- transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +5 -1
- transformers/models/seed_oss/modeling_seed_oss.py +2 -2
- transformers/models/segformer/modeling_segformer.py +4 -1
- transformers/models/seggpt/modeling_seggpt.py +2 -0
- transformers/models/sew/modeling_sew.py +3 -0
- transformers/models/sew/modular_sew.py +1 -0
- transformers/models/sew_d/modeling_sew_d.py +3 -0
- transformers/models/siglip2/modeling_siglip2.py +4 -0
- transformers/models/siglip2/modular_siglip2.py +4 -0
- transformers/models/smollm3/modeling_smollm3.py +4 -4
- transformers/models/smolvlm/processing_smolvlm.py +0 -7
- transformers/models/speech_to_text/modeling_speech_to_text.py +4 -0
- transformers/models/speecht5/modeling_speecht5.py +13 -1
- transformers/models/splinter/modeling_splinter.py +3 -0
- transformers/models/splinter/tokenization_splinter.py +9 -28
- transformers/models/squeezebert/modeling_squeezebert.py +6 -0
- transformers/models/stablelm/modeling_stablelm.py +3 -1
- transformers/models/starcoder2/modeling_starcoder2.py +4 -3
- transformers/models/superglue/modeling_superglue.py +1 -0
- transformers/models/superpoint/modeling_superpoint.py +1 -0
- transformers/models/swiftformer/modeling_swiftformer.py +2 -0
- transformers/models/swin/modeling_swin.py +4 -0
- transformers/models/swin2sr/modeling_swin2sr.py +2 -0
- transformers/models/swinv2/modeling_swinv2.py +4 -0
- transformers/models/t5/modeling_t5.py +7 -0
- transformers/models/t5/tokenization_t5.py +4 -8
- transformers/models/t5gemma/modeling_t5gemma.py +5 -5
- transformers/models/t5gemma2/modeling_t5gemma2.py +6 -6
- transformers/models/table_transformer/modeling_table_transformer.py +4 -0
- transformers/models/tapas/modeling_tapas.py +3 -0
- transformers/models/textnet/modeling_textnet.py +11 -2
- transformers/models/time_series_transformer/modeling_time_series_transformer.py +4 -0
- transformers/models/timesfm/modeling_timesfm.py +2 -0
- transformers/models/timesfm/modular_timesfm.py +2 -0
- transformers/models/timesformer/modeling_timesformer.py +2 -0
- transformers/models/timm_wrapper/modeling_timm_wrapper.py +1 -1
- transformers/models/trocr/modeling_trocr.py +2 -0
- transformers/models/tvp/modeling_tvp.py +2 -0
- transformers/models/udop/modeling_udop.py +4 -0
- transformers/models/udop/tokenization_udop.py +5 -13
- transformers/models/umt5/modeling_umt5.py +7 -0
- transformers/models/unispeech/modeling_unispeech.py +4 -0
- transformers/models/unispeech/modular_unispeech.py +2 -0
- transformers/models/unispeech_sat/modeling_unispeech_sat.py +6 -0
- transformers/models/unispeech_sat/modular_unispeech_sat.py +2 -0
- transformers/models/univnet/modeling_univnet.py +1 -0
- transformers/models/upernet/modeling_upernet.py +1 -0
- transformers/models/vaultgemma/modeling_vaultgemma.py +4 -4
- transformers/models/vilt/modeling_vilt.py +6 -0
- transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +1 -0
- transformers/models/visual_bert/modeling_visual_bert.py +6 -0
- transformers/models/vitdet/modeling_vitdet.py +2 -0
- transformers/models/vitmatte/modeling_vitmatte.py +1 -0
- transformers/models/vits/modeling_vits.py +1 -0
- transformers/models/vjepa2/modeling_vjepa2.py +1 -0
- transformers/models/wav2vec2/modeling_wav2vec2.py +7 -0
- transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +5 -0
- transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +5 -0
- transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +6 -0
- transformers/models/wavlm/modeling_wavlm.py +5 -0
- transformers/models/whisper/modeling_whisper.py +6 -0
- transformers/models/whisper/tokenization_whisper.py +4 -15
- transformers/models/x_clip/modeling_x_clip.py +3 -0
- transformers/models/xglm/modeling_xglm.py +1 -0
- transformers/models/xglm/tokenization_xglm.py +4 -9
- transformers/models/xlm/modeling_xlm.py +5 -0
- transformers/models/xlm_roberta/tokenization_xlm_roberta.py +9 -16
- transformers/models/xlnet/tokenization_xlnet.py +3 -7
- transformers/models/yoso/modeling_yoso.py +6 -0
- transformers/models/zamba/modeling_zamba.py +2 -0
- transformers/models/zamba2/modeling_zamba2.py +4 -2
- transformers/models/zamba2/modular_zamba2.py +1 -1
- transformers/models/zoedepth/modeling_zoedepth.py +1 -0
- transformers/pipelines/__init__.py +2 -3
- transformers/pipelines/base.py +1 -9
- transformers/pipelines/document_question_answering.py +3 -1
- transformers/pipelines/text_generation.py +1 -1
- transformers/processing_utils.py +23 -11
- transformers/quantizers/base.py +35 -110
- transformers/quantizers/quantizer_aqlm.py +1 -5
- transformers/quantizers/quantizer_auto_round.py +1 -2
- transformers/quantizers/quantizer_awq.py +17 -81
- transformers/quantizers/quantizer_bitnet.py +3 -8
- transformers/quantizers/quantizer_bnb_4bit.py +13 -110
- transformers/quantizers/quantizer_bnb_8bit.py +16 -92
- transformers/quantizers/quantizer_compressed_tensors.py +1 -5
- transformers/quantizers/quantizer_eetq.py +14 -62
- transformers/quantizers/quantizer_fbgemm_fp8.py +34 -125
- transformers/quantizers/quantizer_finegrained_fp8.py +13 -105
- transformers/quantizers/quantizer_fp_quant.py +48 -78
- transformers/quantizers/quantizer_gptq.py +7 -24
- transformers/quantizers/quantizer_higgs.py +40 -54
- transformers/quantizers/quantizer_hqq.py +144 -153
- transformers/quantizers/quantizer_mxfp4.py +13 -167
- transformers/quantizers/quantizer_quanto.py +20 -64
- transformers/quantizers/quantizer_quark.py +36 -17
- transformers/quantizers/quantizer_spqr.py +1 -4
- transformers/quantizers/quantizer_torchao.py +23 -202
- transformers/quantizers/quantizer_vptq.py +8 -22
- transformers/quantizers/quantizers_utils.py +20 -0
- transformers/testing_utils.py +297 -36
- transformers/tokenization_mistral_common.py +4 -0
- transformers/tokenization_utils_base.py +113 -222
- transformers/tokenization_utils_tokenizers.py +168 -107
- transformers/trainer.py +28 -31
- transformers/trainer_jit_checkpoint.py +126 -0
- transformers/trainer_utils.py +1 -1
- transformers/training_args.py +66 -28
- transformers/utils/__init__.py +3 -4
- transformers/utils/auto_docstring.py +1 -0
- transformers/utils/generic.py +27 -1
- transformers/utils/hub.py +5 -15
- transformers/utils/import_utils.py +61 -16
- transformers/utils/kernel_config.py +4 -2
- transformers/utils/loading_report.py +19 -10
- transformers/utils/quantization_config.py +75 -242
- transformers/video_processing_utils.py +1 -2
- {transformers-5.0.0rc0.dist-info → transformers-5.0.0rc1.dist-info}/METADATA +274 -227
- {transformers-5.0.0rc0.dist-info → transformers-5.0.0rc1.dist-info}/RECORD +536 -520
- {transformers-5.0.0rc0.dist-info → transformers-5.0.0rc1.dist-info}/WHEEL +1 -1
- transformers/kernels/__init__.py +0 -0
- transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py +0 -529
- transformers/models/roformer/tokenization_roformer_fast.py +0 -160
- {transformers-5.0.0rc0.dist-info → transformers-5.0.0rc1.dist-info}/entry_points.txt +0 -0
- {transformers-5.0.0rc0.dist-info → transformers-5.0.0rc1.dist-info/licenses}/LICENSE +0 -0
- {transformers-5.0.0rc0.dist-info → transformers-5.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -793,6 +793,7 @@ class Owlv2TextModel(Owlv2PreTrainedModel):
|
|
|
793
793
|
output_attentions: Optional[bool] = None,
|
|
794
794
|
output_hidden_states: Optional[bool] = None,
|
|
795
795
|
return_dict: Optional[bool] = None,
|
|
796
|
+
**kwargs,
|
|
796
797
|
) -> Union[tuple, BaseModelOutputWithPooling]:
|
|
797
798
|
r"""
|
|
798
799
|
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
|
|
@@ -903,6 +904,7 @@ class Owlv2VisionModel(Owlv2PreTrainedModel):
|
|
|
903
904
|
output_hidden_states: Optional[bool] = None,
|
|
904
905
|
interpolate_pos_encoding: bool = False,
|
|
905
906
|
return_dict: Optional[bool] = None,
|
|
907
|
+
**kwargs,
|
|
906
908
|
) -> Union[tuple, BaseModelOutputWithPooling]:
|
|
907
909
|
r"""
|
|
908
910
|
Examples:
|
|
@@ -1052,6 +1054,7 @@ class Owlv2Model(Owlv2PreTrainedModel):
|
|
|
1052
1054
|
interpolate_pos_encoding: bool = False,
|
|
1053
1055
|
return_base_image_embeds: Optional[bool] = None,
|
|
1054
1056
|
return_dict: Optional[bool] = None,
|
|
1057
|
+
**kwargs,
|
|
1055
1058
|
) -> Union[tuple, Owlv2Output]:
|
|
1056
1059
|
r"""
|
|
1057
1060
|
return_loss (`bool`, *optional*):
|
|
@@ -1602,6 +1605,7 @@ class Owlv2ForObjectDetection(Owlv2PreTrainedModel):
|
|
|
1602
1605
|
output_hidden_states: Optional[bool] = None,
|
|
1603
1606
|
interpolate_pos_encoding: bool = False,
|
|
1604
1607
|
return_dict: Optional[bool] = None,
|
|
1608
|
+
**kwargs,
|
|
1605
1609
|
) -> Owlv2ObjectDetectionOutput:
|
|
1606
1610
|
r"""
|
|
1607
1611
|
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*):
|
|
@@ -777,6 +777,7 @@ class OwlViTTextModel(OwlViTPreTrainedModel):
|
|
|
777
777
|
output_attentions: Optional[bool] = None,
|
|
778
778
|
output_hidden_states: Optional[bool] = None,
|
|
779
779
|
return_dict: Optional[bool] = None,
|
|
780
|
+
**kwargs,
|
|
780
781
|
) -> Union[tuple, BaseModelOutputWithPooling]:
|
|
781
782
|
r"""
|
|
782
783
|
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
|
|
@@ -885,6 +886,7 @@ class OwlViTVisionModel(OwlViTPreTrainedModel):
|
|
|
885
886
|
output_hidden_states: Optional[bool] = None,
|
|
886
887
|
interpolate_pos_encoding: bool = False,
|
|
887
888
|
return_dict: Optional[bool] = None,
|
|
889
|
+
**kwargs,
|
|
888
890
|
) -> Union[tuple, BaseModelOutputWithPooling]:
|
|
889
891
|
r"""
|
|
890
892
|
Examples:
|
|
@@ -1033,6 +1035,7 @@ class OwlViTModel(OwlViTPreTrainedModel):
|
|
|
1033
1035
|
interpolate_pos_encoding: bool = False,
|
|
1034
1036
|
return_base_image_embeds: Optional[bool] = None,
|
|
1035
1037
|
return_dict: Optional[bool] = None,
|
|
1038
|
+
**kwargs,
|
|
1036
1039
|
) -> Union[tuple, OwlViTOutput]:
|
|
1037
1040
|
r"""
|
|
1038
1041
|
return_loss (`bool`, *optional*):
|
|
@@ -1543,6 +1546,7 @@ class OwlViTForObjectDetection(OwlViTPreTrainedModel):
|
|
|
1543
1546
|
output_hidden_states: Optional[bool] = None,
|
|
1544
1547
|
interpolate_pos_encoding: bool = False,
|
|
1545
1548
|
return_dict: Optional[bool] = None,
|
|
1549
|
+
**kwargs,
|
|
1546
1550
|
) -> OwlViTObjectDetectionOutput:
|
|
1547
1551
|
r"""
|
|
1548
1552
|
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*):
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# Copyright 2025 the HuggingFace Team. All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from typing import TYPE_CHECKING
|
|
17
|
+
|
|
18
|
+
from ...utils import _LazyModule
|
|
19
|
+
from ...utils.import_utils import define_import_structure
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from .configuration_paddleocr_vl import *
|
|
24
|
+
from .image_processing_paddleocr_vl import *
|
|
25
|
+
from .image_processing_paddleocr_vl_fast import *
|
|
26
|
+
from .modeling_paddleocr_vl import *
|
|
27
|
+
from .processing_paddleocr_vl import *
|
|
28
|
+
else:
|
|
29
|
+
import sys
|
|
30
|
+
|
|
31
|
+
_file = globals()["__file__"]
|
|
32
|
+
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
2
|
+
# This file was automatically generated from src/transformers/models/paddleocr_vl/modular_paddleocr_vl.py.
|
|
3
|
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
|
4
|
+
# the file from the modular. If any change should be done, please apply the change to the
|
|
5
|
+
# modular_paddleocr_vl.py file directly. One of our CI enforces this.
|
|
6
|
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
7
|
+
# Copyright 2025 The PaddlePaddle Team and The HuggingFace Inc. team. All rights reserved.
|
|
8
|
+
#
|
|
9
|
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
|
10
|
+
# and OPT implementations in this library. It has been modified from its
|
|
11
|
+
# original forms to accommodate minor architectural differences compared
|
|
12
|
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
|
13
|
+
#
|
|
14
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
15
|
+
# you may not use this file except in compliance with the License.
|
|
16
|
+
# You may obtain a copy of the License at
|
|
17
|
+
#
|
|
18
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
19
|
+
#
|
|
20
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
21
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
22
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
23
|
+
# See the License for the specific language governing permissions and
|
|
24
|
+
# limitations under the License.
|
|
25
|
+
|
|
26
|
+
import inspect
|
|
27
|
+
from typing import Optional
|
|
28
|
+
|
|
29
|
+
from ...configuration_utils import PreTrainedConfig
|
|
30
|
+
from ...modeling_rope_utils import RopeParameters
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class PaddleOCRVisionConfig(PreTrainedConfig):
|
|
34
|
+
r"""
|
|
35
|
+
This is the configuration class to store the configuration of a [`PaddleOCRVisionModel`]. It is used to instantiate a
|
|
36
|
+
PaddleOCRVL vision encoder according to the specified arguments, defining the model architecture. Instantiating a
|
|
37
|
+
configuration with the defaults will yield a similar configuration to that of the vision encoder of the PaddleOCRVL
|
|
38
|
+
[PaddlePaddle/PaddleOCRVL](https://huggingface.co/PaddlePaddle/PaddleOCR-VL) architecture.
|
|
39
|
+
|
|
40
|
+
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
|
|
41
|
+
documentation from [`PreTrainedConfig`] for more information.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
hidden_size (`int`, *optional*, defaults to 1152):
|
|
45
|
+
Dimensionality of the encoder layers and the pooler layer.
|
|
46
|
+
intermediate_size (`int`, *optional*, defaults to 4304):
|
|
47
|
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
|
48
|
+
num_hidden_layers (`int`, *optional*, defaults to 27):
|
|
49
|
+
Number of hidden layers in the Transformer encoder.
|
|
50
|
+
num_attention_heads (`int`, *optional*, defaults to 16):
|
|
51
|
+
Number of attention heads for each attention layer in the Transformer encoder.
|
|
52
|
+
num_channels (`int`, *optional*, defaults to 3):
|
|
53
|
+
Number of channels in the input images.
|
|
54
|
+
image_size (`int`, *optional*, defaults to 384):
|
|
55
|
+
The size (resolution) of each image.
|
|
56
|
+
patch_size (`int`, *optional*, defaults to 14):
|
|
57
|
+
The size (resolution) of each patch.
|
|
58
|
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
|
|
59
|
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
|
60
|
+
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
|
|
61
|
+
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
|
|
62
|
+
The epsilon used by the layer normalization layers.
|
|
63
|
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
|
64
|
+
The dropout ratio for the attention probabilities.
|
|
65
|
+
spatial_merge_size (`int`, *optional*, defaults to 2):
|
|
66
|
+
The size used for merging spatial dimensions.
|
|
67
|
+
|
|
68
|
+
Example:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
>>> from transformers import PaddleOCRVisionConfig, PaddleOCRVisionModel
|
|
72
|
+
|
|
73
|
+
>>> # Initializing a PaddleOCRVisionConfig with PaddlePaddle/PaddleOCR-VL style configuration
|
|
74
|
+
>>> configuration = PaddleOCRVisionConfig()
|
|
75
|
+
|
|
76
|
+
>>> # Initializing a PaddleOCRVisionModel (with random weights) from the PaddlePaddle/PaddleOCR-VL style configuration
|
|
77
|
+
>>> model = PaddleOCRVisionModel(configuration)
|
|
78
|
+
|
|
79
|
+
>>> # Accessing the model configuration
|
|
80
|
+
>>> configuration = model.config
|
|
81
|
+
```
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
model_type = "paddleocr_vl_vision"
|
|
85
|
+
base_config_key = "vision_config"
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
hidden_size=1152,
|
|
90
|
+
intermediate_size=4304,
|
|
91
|
+
num_hidden_layers=27,
|
|
92
|
+
num_attention_heads=16,
|
|
93
|
+
num_channels=3,
|
|
94
|
+
image_size=384,
|
|
95
|
+
patch_size=14,
|
|
96
|
+
hidden_act="gelu_pytorch_tanh",
|
|
97
|
+
layer_norm_eps=1e-6,
|
|
98
|
+
attention_dropout=0.0,
|
|
99
|
+
spatial_merge_size=2,
|
|
100
|
+
**kwargs,
|
|
101
|
+
):
|
|
102
|
+
super().__init__(**kwargs)
|
|
103
|
+
|
|
104
|
+
self.hidden_size = hidden_size
|
|
105
|
+
self.intermediate_size = intermediate_size
|
|
106
|
+
self.num_hidden_layers = num_hidden_layers
|
|
107
|
+
self.num_attention_heads = num_attention_heads
|
|
108
|
+
self.num_channels = num_channels
|
|
109
|
+
self.patch_size = patch_size
|
|
110
|
+
self.image_size = image_size
|
|
111
|
+
self.attention_dropout = attention_dropout
|
|
112
|
+
self.layer_norm_eps = layer_norm_eps
|
|
113
|
+
self.hidden_act = hidden_act
|
|
114
|
+
self.spatial_merge_size = spatial_merge_size
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class PaddleOCRTextConfig(PreTrainedConfig):
|
|
118
|
+
r"""
|
|
119
|
+
This is the configuration class to store the configuration of a [`PaddleOCRTextModel`]. It is used to instantiate an Ernie 4.5
|
|
120
|
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
|
121
|
+
defaults will yield a similar configuration to that of the Ernie 4.5 0.3B.
|
|
122
|
+
e.g. [baidu/ERNIE-4.5-0.3B-PT](https://huggingface.co/baidu/ERNIE-4.5-0.3B-PT)
|
|
123
|
+
|
|
124
|
+
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
|
|
125
|
+
documentation from [`PreTrainedConfig`] for more information.
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
vocab_size (`int`, *optional*, defaults to 103424):
|
|
130
|
+
Vocabulary size of the Ernie 4.5 model. Defines the number of different tokens that can be represented by the
|
|
131
|
+
`inputs_ids` passed when calling [`PaddleOCRTextModel`]
|
|
132
|
+
hidden_size (`int`, *optional*, defaults to 1024):
|
|
133
|
+
Dimension of the hidden representations.
|
|
134
|
+
intermediate_size (`int`, *optional*, defaults to 3072):
|
|
135
|
+
Dimension of the MLP representations.
|
|
136
|
+
num_hidden_layers (`int`, *optional*, defaults to 18):
|
|
137
|
+
Number of hidden layers in the Transformer decoder.
|
|
138
|
+
num_attention_heads (`int`, *optional*, defaults to 16):
|
|
139
|
+
Number of attention heads for each attention layer in the Transformer decoder.
|
|
140
|
+
num_key_value_heads (`int`, *optional*, defaults to 2):
|
|
141
|
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
|
142
|
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
|
143
|
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
|
144
|
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
|
145
|
+
by meanpooling all the original heads within that group. For more details, check out [this
|
|
146
|
+
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
|
|
147
|
+
`num_attention_heads`.
|
|
148
|
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
|
149
|
+
The non-linear activation function (function or string) in the decoder.
|
|
150
|
+
max_position_embeddings (`int`, *optional*, defaults to 131072):
|
|
151
|
+
The maximum sequence length that this model might ever be used with.
|
|
152
|
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
|
153
|
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
|
154
|
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
|
155
|
+
The epsilon used by the rms normalization layers.
|
|
156
|
+
use_cache (`bool`, *optional*, defaults to `True`):
|
|
157
|
+
Whether or not the model should return the last key/values attentions.
|
|
158
|
+
pad_token_id (`int`, *optional*, defaults to 0):
|
|
159
|
+
Padding token id.
|
|
160
|
+
bos_token_id (`int`, *optional*, defaults to 1):
|
|
161
|
+
Beginning of stream token id.
|
|
162
|
+
eos_token_id (`int`, *optional*, defaults to 2):
|
|
163
|
+
End of stream token id.
|
|
164
|
+
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
|
|
165
|
+
Whether to tie weight embeddings
|
|
166
|
+
rope_parameters (`RopeParameters`, *optional*):
|
|
167
|
+
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
|
|
168
|
+
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
|
|
169
|
+
with longer `max_position_embeddings`.
|
|
170
|
+
use_bias (`bool`, *optional*, defaults to `False`):
|
|
171
|
+
Whether to use a bias in any of the projections including mlp and attention for example.
|
|
172
|
+
head_dim (`int`, *optional*, defaults to 128):
|
|
173
|
+
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
|
|
174
|
+
|
|
175
|
+
```python
|
|
176
|
+
>>> from transformers import PaddleOCRTextModel, PaddleOCRTextConfig
|
|
177
|
+
|
|
178
|
+
>>> # Initializing a PaddleOCRText 0.3B style configuration
|
|
179
|
+
>>> configuration = PaddleOCRTextConfig()
|
|
180
|
+
|
|
181
|
+
>>> # Initializing a model from the 0.3B style configuration
|
|
182
|
+
>>> model = PaddleOCRTextModel(configuration)
|
|
183
|
+
|
|
184
|
+
>>> # Accessing the model configuration
|
|
185
|
+
>>> configuration = model.config
|
|
186
|
+
```"""
|
|
187
|
+
|
|
188
|
+
model_type = "paddleocr_vl_text"
|
|
189
|
+
keys_to_ignore_at_inference = ["past_key_values"]
|
|
190
|
+
default_theta = 500000.0
|
|
191
|
+
# Default tensor parallel plan for base model `PaddleOCRTextModel`
|
|
192
|
+
base_model_tp_plan = {
|
|
193
|
+
"layers.*.self_attn.q_proj": "colwise",
|
|
194
|
+
"layers.*.self_attn.k_proj": "colwise",
|
|
195
|
+
"layers.*.self_attn.v_proj": "colwise",
|
|
196
|
+
"layers.*.self_attn.o_proj": "rowwise",
|
|
197
|
+
"layers.*.mlp.gate_proj": "colwise",
|
|
198
|
+
"layers.*.mlp.up_proj": "colwise",
|
|
199
|
+
"layers.*.mlp.down_proj": "rowwise",
|
|
200
|
+
}
|
|
201
|
+
base_model_pp_plan = {
|
|
202
|
+
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
|
|
203
|
+
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
|
|
204
|
+
"norm": (["hidden_states"], ["hidden_states"]),
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
def __init__(
|
|
208
|
+
self,
|
|
209
|
+
vocab_size: Optional[int] = 103424,
|
|
210
|
+
hidden_size: Optional[int] = 1024,
|
|
211
|
+
intermediate_size: Optional[int] = 3072,
|
|
212
|
+
num_hidden_layers: Optional[int] = 18,
|
|
213
|
+
num_attention_heads: Optional[int] = 16,
|
|
214
|
+
num_key_value_heads: Optional[int] = 2,
|
|
215
|
+
hidden_act: Optional[str] = "silu",
|
|
216
|
+
max_position_embeddings: Optional[int] = 131072,
|
|
217
|
+
initializer_range: Optional[float] = 0.02,
|
|
218
|
+
rms_norm_eps: Optional[int] = 1e-05,
|
|
219
|
+
use_cache: Optional[int] = True,
|
|
220
|
+
pad_token_id: Optional[int] = 0,
|
|
221
|
+
bos_token_id: Optional[int] = 1,
|
|
222
|
+
eos_token_id: Optional[int] = 2,
|
|
223
|
+
tie_word_embeddings: Optional[bool] = True,
|
|
224
|
+
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
|
|
225
|
+
use_bias: Optional[bool] = False,
|
|
226
|
+
head_dim: Optional[int] = 128,
|
|
227
|
+
**kwargs,
|
|
228
|
+
):
|
|
229
|
+
self.vocab_size = vocab_size
|
|
230
|
+
self.max_position_embeddings = max_position_embeddings
|
|
231
|
+
self.hidden_size = hidden_size
|
|
232
|
+
self.intermediate_size = intermediate_size
|
|
233
|
+
self.num_hidden_layers = num_hidden_layers
|
|
234
|
+
self.num_attention_heads = num_attention_heads
|
|
235
|
+
|
|
236
|
+
# for backward compatibility
|
|
237
|
+
if num_key_value_heads is None:
|
|
238
|
+
num_key_value_heads = num_attention_heads
|
|
239
|
+
|
|
240
|
+
self.num_key_value_heads = num_key_value_heads
|
|
241
|
+
self.hidden_act = hidden_act
|
|
242
|
+
self.initializer_range = initializer_range
|
|
243
|
+
self.rms_norm_eps = rms_norm_eps
|
|
244
|
+
self.use_cache = use_cache
|
|
245
|
+
self.use_bias = use_bias
|
|
246
|
+
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
|
|
247
|
+
self.rope_parameters = rope_parameters
|
|
248
|
+
|
|
249
|
+
super().__init__(
|
|
250
|
+
pad_token_id=pad_token_id,
|
|
251
|
+
bos_token_id=bos_token_id,
|
|
252
|
+
eos_token_id=eos_token_id,
|
|
253
|
+
tie_word_embeddings=tie_word_embeddings,
|
|
254
|
+
**kwargs,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class PaddleOCRVLConfig(PreTrainedConfig):
|
|
259
|
+
r"""
|
|
260
|
+
This is the configuration class to store the configuration of a [`PaddleOCRVLForConditionalGeneration`]. It is used to instantiate a
|
|
261
|
+
PaddleOCRVL model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
262
|
+
with the defaults will yield a similar configuration to that of
|
|
263
|
+
PaddleOCRVL [PaddlePaddle/PaddleOCR-VL](https://huggingface.co/PaddlePaddle/PaddleOCR-VL).
|
|
264
|
+
|
|
265
|
+
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
|
|
266
|
+
documentation from [`PreTrainedConfig`] for more information.
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `PaddleOCRTextConfig`):
|
|
271
|
+
The config object or dictionary of the text backbone.
|
|
272
|
+
vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `PaddleOCRVisionConfig`):
|
|
273
|
+
The config object or dictionary of the vision backbone.
|
|
274
|
+
image_token_id (`int`, *optional*, defaults to 100295):
|
|
275
|
+
The image token index to encode the image prompt.
|
|
276
|
+
video_token_id (`int`, *optional*, defaults to 100296):
|
|
277
|
+
The video token index to encode the image prompt.
|
|
278
|
+
vision_start_token_id (`int`, *optional*, defaults to 101305):
|
|
279
|
+
The token index to denote start of vision input.
|
|
280
|
+
vision_end_token_id (`int`, *optional*, defaults to 101306):
|
|
281
|
+
The token index to denote end of vision input.
|
|
282
|
+
|
|
283
|
+
```python
|
|
284
|
+
>>> from transformers import PaddleOCRVLForConditionalGeneration, PaddleOCRVLConfig
|
|
285
|
+
|
|
286
|
+
>>> # Initializing a PaddleOCRVL style configuration
|
|
287
|
+
>>> configuration = PaddleOCRVLConfig()
|
|
288
|
+
|
|
289
|
+
>>> # Initializing a model from the PaddleOCRVL style configuration
|
|
290
|
+
>>> model = PaddleOCRVLForConditionalGeneration(configuration)
|
|
291
|
+
|
|
292
|
+
>>> # Accessing the model configuration
|
|
293
|
+
>>> configuration = model.config
|
|
294
|
+
```"""
|
|
295
|
+
|
|
296
|
+
model_type = "paddleocr_vl"
|
|
297
|
+
|
|
298
|
+
sub_configs = {"vision_config": PaddleOCRVisionConfig, "text_config": PaddleOCRTextConfig}
|
|
299
|
+
keys_to_ignore_at_inference = ["past_key_values"]
|
|
300
|
+
|
|
301
|
+
def __init__(
|
|
302
|
+
self,
|
|
303
|
+
text_config=None,
|
|
304
|
+
vision_config=None,
|
|
305
|
+
image_token_id=100295,
|
|
306
|
+
video_token_id=100296,
|
|
307
|
+
vision_start_token_id=101305,
|
|
308
|
+
vision_end_token_id=101306,
|
|
309
|
+
**kwargs,
|
|
310
|
+
):
|
|
311
|
+
if isinstance(vision_config, dict):
|
|
312
|
+
self.vision_config = self.sub_configs["vision_config"](**vision_config)
|
|
313
|
+
elif vision_config is None:
|
|
314
|
+
self.vision_config = self.sub_configs["vision_config"]()
|
|
315
|
+
|
|
316
|
+
if isinstance(text_config, dict):
|
|
317
|
+
self.text_config = self.sub_configs["text_config"](**text_config)
|
|
318
|
+
elif text_config is None:
|
|
319
|
+
# Hub configs are saved as flat dicts so we pop some of kwargs to init `TextConfig`
|
|
320
|
+
text_params = inspect.signature(self.sub_configs["text_config"].__init__).parameters.keys()
|
|
321
|
+
text_params = list(text_params) + ["rope_scaling", "rope_theta"]
|
|
322
|
+
text_config = {key: kwargs.pop(key) for key in text_params if key in kwargs}
|
|
323
|
+
text_config["dtype"] = kwargs.get("torch_dtype", kwargs.get("dtype")) # don't pop the dtype
|
|
324
|
+
self.text_config = self.sub_configs["text_config"](**text_config)
|
|
325
|
+
|
|
326
|
+
self.image_token_id = image_token_id
|
|
327
|
+
self.video_token_id = video_token_id
|
|
328
|
+
self.vision_start_token_id = vision_start_token_id
|
|
329
|
+
self.vision_end_token_id = vision_end_token_id
|
|
330
|
+
|
|
331
|
+
# FIXME: arthur/cyril - tying has to be used from the text config
|
|
332
|
+
kwargs["tie_word_embeddings"] = self.text_config.tie_word_embeddings
|
|
333
|
+
super().__init__(**kwargs)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
__all__ = ["PaddleOCRVLConfig", "PaddleOCRVisionConfig", "PaddleOCRTextConfig"]
|