transformers 5.0.0rc1__py3-none-any.whl → 5.0.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- transformers/__init__.py +20 -1
- transformers/activations.py +1 -1
- transformers/audio_utils.py +0 -1
- transformers/cache_utils.py +17 -15
- transformers/configuration_utils.py +114 -70
- transformers/conversion_mapping.py +68 -5
- transformers/core_model_loading.py +201 -35
- transformers/dependency_versions_table.py +1 -1
- transformers/feature_extraction_utils.py +54 -22
- transformers/generation/candidate_generator.py +79 -31
- transformers/generation/configuration_utils.py +162 -122
- transformers/generation/continuous_batching/cache.py +47 -18
- transformers/generation/continuous_batching/cache_manager.py +131 -34
- transformers/generation/continuous_batching/continuous_api.py +101 -64
- transformers/generation/continuous_batching/requests.py +28 -1
- transformers/generation/continuous_batching/scheduler.py +11 -4
- transformers/generation/stopping_criteria.py +1 -1
- transformers/generation/utils.py +108 -110
- transformers/generation/watermarking.py +8 -5
- transformers/image_processing_base.py +2 -12
- transformers/image_processing_utils_fast.py +15 -4
- transformers/initialization.py +37 -0
- transformers/integrations/__init__.py +12 -0
- transformers/integrations/accelerate.py +44 -111
- transformers/integrations/aqlm.py +3 -5
- transformers/integrations/awq.py +2 -5
- transformers/integrations/bitnet.py +5 -8
- transformers/integrations/bitsandbytes.py +16 -15
- transformers/integrations/deepspeed.py +18 -3
- transformers/integrations/eetq.py +3 -5
- transformers/integrations/fbgemm_fp8.py +1 -1
- transformers/integrations/finegrained_fp8.py +6 -16
- transformers/integrations/flash_attention.py +2 -2
- transformers/integrations/higgs.py +2 -5
- transformers/integrations/hub_kernels.py +23 -5
- transformers/integrations/integration_utils.py +35 -0
- transformers/integrations/mistral.py +12 -0
- transformers/integrations/moe.py +240 -0
- transformers/integrations/mxfp4.py +4 -10
- transformers/integrations/peft.py +5 -0
- transformers/integrations/quanto.py +5 -2
- transformers/integrations/spqr.py +3 -5
- transformers/integrations/tensor_parallel.py +167 -221
- transformers/integrations/vptq.py +3 -5
- transformers/modeling_gguf_pytorch_utils.py +66 -19
- transformers/modeling_rope_utils.py +78 -81
- transformers/modeling_utils.py +583 -503
- transformers/models/__init__.py +19 -0
- transformers/models/afmoe/modeling_afmoe.py +7 -16
- transformers/models/afmoe/modular_afmoe.py +5 -13
- transformers/models/aimv2/modeling_aimv2.py +4 -0
- transformers/models/aimv2/modular_aimv2.py +4 -0
- transformers/models/albert/modeling_albert.py +3 -0
- transformers/models/align/modeling_align.py +12 -6
- transformers/models/altclip/modeling_altclip.py +7 -3
- transformers/models/apertus/modeling_apertus.py +4 -2
- transformers/models/apertus/modular_apertus.py +4 -1
- transformers/models/arcee/modeling_arcee.py +1 -1
- transformers/models/aria/modeling_aria.py +8 -4
- transformers/models/aria/modular_aria.py +7 -3
- transformers/models/audioflamingo3/processing_audioflamingo3.py +27 -22
- transformers/models/auto/auto_factory.py +1 -1
- transformers/models/auto/configuration_auto.py +27 -0
- transformers/models/auto/feature_extraction_auto.py +7 -3
- transformers/models/auto/image_processing_auto.py +4 -2
- transformers/models/auto/modeling_auto.py +31 -0
- transformers/models/auto/processing_auto.py +4 -0
- transformers/models/auto/tokenization_auto.py +132 -153
- transformers/models/auto/video_processing_auto.py +5 -2
- transformers/models/aya_vision/modeling_aya_vision.py +7 -3
- transformers/models/bamba/modeling_bamba.py +18 -19
- transformers/models/bamba/modular_bamba.py +17 -16
- transformers/models/bark/modeling_bark.py +9 -0
- transformers/models/bart/configuration_bart.py +0 -1
- transformers/models/bart/modeling_bart.py +7 -0
- transformers/models/beit/image_processing_beit_fast.py +0 -1
- transformers/models/bert/modeling_bert.py +3 -0
- transformers/models/bert_generation/modeling_bert_generation.py +2 -0
- transformers/models/big_bird/modeling_big_bird.py +3 -0
- transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +7 -0
- transformers/models/bit/modeling_bit.py +5 -1
- transformers/models/bitnet/modeling_bitnet.py +1 -1
- transformers/models/blenderbot/modeling_blenderbot.py +7 -0
- transformers/models/blenderbot/tokenization_blenderbot.py +6 -7
- transformers/models/blenderbot_small/modeling_blenderbot_small.py +7 -0
- transformers/models/blip/modeling_blip.py +2 -0
- transformers/models/blip/modeling_blip_text.py +8 -0
- transformers/models/blip_2/modeling_blip_2.py +2 -0
- transformers/models/bloom/modeling_bloom.py +13 -44
- transformers/models/blt/modeling_blt.py +162 -2
- transformers/models/blt/modular_blt.py +168 -3
- transformers/models/bridgetower/image_processing_bridgetower_fast.py +0 -2
- transformers/models/bridgetower/modeling_bridgetower.py +6 -0
- transformers/models/bros/modeling_bros.py +8 -0
- transformers/models/camembert/modeling_camembert.py +109 -106
- transformers/models/canine/modeling_canine.py +6 -0
- transformers/models/canine/tokenization_canine.py +2 -0
- transformers/models/chameleon/modeling_chameleon.py +9 -4
- transformers/models/chinese_clip/modeling_chinese_clip.py +6 -3
- transformers/models/clap/feature_extraction_clap.py +2 -2
- transformers/models/clap/modeling_clap.py +25 -15
- transformers/models/clip/modeling_clip.py +2 -0
- transformers/models/clipseg/modeling_clipseg.py +4 -0
- transformers/models/clvp/modeling_clvp.py +14 -3
- transformers/models/code_llama/tokenization_code_llama.py +1 -1
- transformers/models/codegen/modeling_codegen.py +13 -4
- transformers/models/cohere/modeling_cohere.py +1 -1
- transformers/models/cohere2/modeling_cohere2.py +1 -1
- transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +0 -1
- transformers/models/cohere2_vision/modeling_cohere2_vision.py +7 -3
- transformers/models/conditional_detr/configuration_conditional_detr.py +1 -1
- transformers/models/conditional_detr/modeling_conditional_detr.py +4 -1
- transformers/models/convbert/modeling_convbert.py +3 -0
- transformers/models/convnext/image_processing_convnext.py +2 -2
- transformers/models/convnext/image_processing_convnext_fast.py +9 -13
- transformers/models/csm/generation_csm.py +19 -22
- transformers/models/csm/modeling_csm.py +3 -1
- transformers/models/csm/modular_csm.py +2 -0
- transformers/models/ctrl/modeling_ctrl.py +14 -2
- transformers/models/cvt/modeling_cvt.py +5 -1
- transformers/models/cwm/modeling_cwm.py +1 -1
- transformers/models/d_fine/configuration_d_fine.py +3 -4
- transformers/models/d_fine/modeling_d_fine.py +46 -39
- transformers/models/d_fine/modular_d_fine.py +15 -4
- transformers/models/dab_detr/configuration_dab_detr.py +2 -2
- transformers/models/dab_detr/modeling_dab_detr.py +1 -1
- transformers/models/dac/modeling_dac.py +4 -4
- transformers/models/data2vec/modeling_data2vec_text.py +7 -0
- transformers/models/data2vec/modular_data2vec_text.py +7 -0
- transformers/models/dbrx/configuration_dbrx.py +9 -1
- transformers/models/dbrx/modeling_dbrx.py +1 -1
- transformers/models/deberta/modeling_deberta.py +2 -0
- transformers/models/deberta_v2/modeling_deberta_v2.py +2 -0
- transformers/models/decision_transformer/modeling_decision_transformer.py +8 -5
- transformers/models/deepseek_v2/modeling_deepseek_v2.py +7 -4
- transformers/models/deepseek_v2/modular_deepseek_v2.py +4 -2
- transformers/models/deepseek_v3/modeling_deepseek_v3.py +9 -5
- transformers/models/deepseek_v3/modular_deepseek_v3.py +6 -2
- transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +0 -1
- transformers/models/deepseek_vl/modeling_deepseek_vl.py +9 -5
- transformers/models/deepseek_vl/modular_deepseek_vl.py +3 -0
- transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +0 -4
- transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +9 -5
- transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +9 -9
- transformers/models/deformable_detr/configuration_deformable_detr.py +2 -2
- transformers/models/deformable_detr/modeling_deformable_detr.py +1 -1
- transformers/models/depth_anything/configuration_depth_anything.py +2 -3
- transformers/models/depth_pro/image_processing_depth_pro_fast.py +0 -1
- transformers/models/detr/configuration_detr.py +1 -1
- transformers/models/detr/modeling_detr.py +8 -1
- transformers/models/dia/generation_dia.py +3 -10
- transformers/models/dia/modeling_dia.py +12 -1
- transformers/models/dia/modular_dia.py +11 -0
- transformers/models/dia/processing_dia.py +1 -1
- transformers/models/diffllama/modeling_diffllama.py +3 -3
- transformers/models/diffllama/modular_diffllama.py +2 -2
- transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +0 -1
- transformers/models/dinov3_vit/modeling_dinov3_vit.py +3 -0
- transformers/models/dinov3_vit/modular_dinov3_vit.py +3 -0
- transformers/models/distilbert/modeling_distilbert.py +11 -9
- transformers/models/doge/modeling_doge.py +1 -1
- transformers/models/donut/image_processing_donut_fast.py +0 -1
- transformers/models/donut/modeling_donut_swin.py +16 -12
- transformers/models/dots1/modeling_dots1.py +14 -5
- transformers/models/dpt/configuration_dpt.py +1 -1
- transformers/models/dpt/image_processing_dpt_fast.py +1 -2
- transformers/models/dpt/modular_dpt.py +1 -2
- transformers/models/edgetam/configuration_edgetam.py +1 -1
- transformers/models/edgetam/modeling_edgetam.py +5 -2
- transformers/models/edgetam/modular_edgetam.py +15 -14
- transformers/models/edgetam_video/modeling_edgetam_video.py +55 -43
- transformers/models/edgetam_video/modular_edgetam_video.py +13 -19
- transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +1 -2
- transformers/models/efficientloftr/modeling_efficientloftr.py +14 -1
- transformers/models/efficientnet/image_processing_efficientnet.py +5 -6
- transformers/models/efficientnet/image_processing_efficientnet_fast.py +1 -2
- transformers/models/efficientnet/modeling_efficientnet.py +5 -1
- transformers/models/electra/modeling_electra.py +7 -0
- transformers/models/emu3/modeling_emu3.py +8 -2
- transformers/models/emu3/modular_emu3.py +7 -1
- transformers/models/encodec/modeling_encodec.py +14 -0
- transformers/models/eomt/image_processing_eomt_fast.py +46 -14
- transformers/models/eomt/modeling_eomt.py +7 -0
- transformers/models/eomt/modular_eomt.py +7 -0
- transformers/models/ernie/modeling_ernie.py +6 -0
- transformers/models/ernie/modular_ernie.py +6 -0
- transformers/models/ernie4_5/modeling_ernie4_5.py +1 -1
- transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +16 -13
- transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +9 -35
- transformers/models/ernie4_5_vl_moe/__init__.py +31 -0
- transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +330 -0
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +456 -0
- transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +232 -0
- transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +1898 -0
- transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +1904 -0
- transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +251 -0
- transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +594 -0
- transformers/models/esm/modeling_esm.py +6 -0
- transformers/models/esm/modeling_esmfold.py +6 -1
- transformers/models/evolla/modeling_evolla.py +9 -1
- transformers/models/evolla/modular_evolla.py +8 -0
- transformers/models/exaone4/modeling_exaone4.py +1 -1
- transformers/models/falcon/modeling_falcon.py +3 -3
- transformers/models/falcon_h1/modeling_falcon_h1.py +28 -23
- transformers/models/falcon_h1/modular_falcon_h1.py +7 -2
- transformers/models/falcon_mamba/modeling_falcon_mamba.py +6 -2
- transformers/models/falcon_mamba/modular_falcon_mamba.py +7 -2
- transformers/models/fast_vlm/modeling_fast_vlm.py +7 -3
- transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +23 -10
- transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +1 -0
- transformers/models/flaubert/modeling_flaubert.py +14 -15
- transformers/models/flava/image_processing_flava_fast.py +0 -2
- transformers/models/flava/modeling_flava.py +4 -1
- transformers/models/flex_olmo/modeling_flex_olmo.py +7 -4
- transformers/models/florence2/modeling_florence2.py +20 -3
- transformers/models/florence2/modular_florence2.py +13 -0
- transformers/models/fnet/modeling_fnet.py +7 -0
- transformers/models/fuyu/image_processing_fuyu.py +1 -1
- transformers/models/fuyu/modeling_fuyu.py +3 -1
- transformers/models/fuyu/processing_fuyu.py +16 -0
- transformers/models/gemma/modeling_gemma.py +10 -12
- transformers/models/gemma/modular_gemma.py +9 -11
- transformers/models/gemma2/modeling_gemma2.py +1 -1
- transformers/models/gemma2/modular_gemma2.py +1 -1
- transformers/models/gemma3/image_processing_gemma3_fast.py +0 -1
- transformers/models/gemma3/modeling_gemma3.py +28 -7
- transformers/models/gemma3/modular_gemma3.py +26 -6
- transformers/models/gemma3n/configuration_gemma3n.py +3 -0
- transformers/models/gemma3n/modeling_gemma3n.py +47 -9
- transformers/models/gemma3n/modular_gemma3n.py +51 -9
- transformers/models/git/modeling_git.py +181 -126
- transformers/models/glm/modeling_glm.py +1 -1
- transformers/models/glm4/modeling_glm4.py +1 -1
- transformers/models/glm46v/image_processing_glm46v.py +0 -4
- transformers/models/glm46v/modeling_glm46v.py +3 -1
- transformers/models/glm46v/modular_glm46v.py +3 -0
- transformers/models/glm4_moe/modeling_glm4_moe.py +9 -5
- transformers/models/glm4_moe/modular_glm4_moe.py +1 -1
- transformers/models/glm4v/image_processing_glm4v.py +0 -4
- transformers/models/glm4v/modeling_glm4v.py +15 -5
- transformers/models/glm4v/modular_glm4v.py +11 -3
- transformers/models/glm4v_moe/modeling_glm4v_moe.py +39 -23
- transformers/models/glm4v_moe/modular_glm4v_moe.py +12 -0
- transformers/models/glmasr/__init__.py +30 -0
- transformers/models/glmasr/configuration_glmasr.py +197 -0
- transformers/models/glmasr/modeling_glmasr.py +512 -0
- transformers/models/glmasr/modular_glmasr.py +433 -0
- transformers/models/glmasr/processing_glmasr.py +332 -0
- transformers/models/glpn/image_processing_glpn_fast.py +0 -1
- transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +0 -1
- transformers/models/got_ocr2/modeling_got_ocr2.py +8 -3
- transformers/models/gpt2/modeling_gpt2.py +8 -5
- transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +3 -8
- transformers/models/gpt_neo/modeling_gpt_neo.py +15 -3
- transformers/models/gpt_neox/modeling_gpt_neox.py +1 -1
- transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +1 -1
- transformers/models/gpt_oss/configuration_gpt_oss.py +17 -0
- transformers/models/gpt_oss/modeling_gpt_oss.py +6 -9
- transformers/models/gpt_oss/modular_gpt_oss.py +5 -7
- transformers/models/gptj/modeling_gptj.py +15 -6
- transformers/models/granite/modeling_granite.py +1 -1
- transformers/models/granite_speech/modeling_granite_speech.py +15 -1
- transformers/models/granitemoe/modeling_granitemoe.py +2 -3
- transformers/models/granitemoe/modular_granitemoe.py +1 -2
- transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +4 -0
- transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +33 -23
- transformers/models/granitemoehybrid/modular_granitemoehybrid.py +12 -2
- transformers/models/granitemoeshared/modeling_granitemoeshared.py +2 -3
- transformers/models/grounding_dino/configuration_grounding_dino.py +2 -3
- transformers/models/grounding_dino/modeling_grounding_dino.py +4 -4
- transformers/models/groupvit/modeling_groupvit.py +6 -1
- transformers/models/helium/modeling_helium.py +1 -1
- transformers/models/hgnet_v2/modeling_hgnet_v2.py +10 -0
- transformers/models/hgnet_v2/modular_hgnet_v2.py +10 -0
- transformers/models/hubert/modeling_hubert.py +4 -0
- transformers/models/hubert/modular_hubert.py +4 -0
- transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +1 -1
- transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +1 -1
- transformers/models/hunyuan_v1_moe/__init__.py +1 -1
- transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +12 -4
- transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +4 -2
- transformers/models/ibert/modeling_ibert.py +16 -0
- transformers/models/idefics/modeling_idefics.py +10 -0
- transformers/models/idefics2/modeling_idefics2.py +7 -1
- transformers/models/idefics3/modeling_idefics3.py +5 -1
- transformers/models/imagegpt/image_processing_imagegpt_fast.py +1 -5
- transformers/models/imagegpt/modeling_imagegpt.py +9 -2
- transformers/models/instructblip/modeling_instructblip.py +2 -0
- transformers/models/instructblipvideo/modeling_instructblipvideo.py +52 -50
- transformers/models/instructblipvideo/video_processing_instructblipvideo.py +0 -1
- transformers/models/internvl/modeling_internvl.py +11 -8
- transformers/models/internvl/modular_internvl.py +5 -9
- transformers/models/internvl/video_processing_internvl.py +0 -1
- transformers/models/jais2/__init__.py +27 -0
- transformers/models/jais2/configuration_jais2.py +152 -0
- transformers/models/jais2/modeling_jais2.py +486 -0
- transformers/models/jais2/modular_jais2.py +196 -0
- transformers/models/jamba/modeling_jamba.py +24 -19
- transformers/models/jamba/modular_jamba.py +17 -17
- transformers/models/janus/image_processing_janus_fast.py +0 -1
- transformers/models/janus/modeling_janus.py +15 -7
- transformers/models/janus/modular_janus.py +16 -7
- transformers/models/jetmoe/modeling_jetmoe.py +2 -2
- transformers/models/jetmoe/modular_jetmoe.py +1 -0
- transformers/models/kosmos2/modeling_kosmos2.py +14 -2
- transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +2 -2
- transformers/models/kosmos2_5/modeling_kosmos2_5.py +10 -1
- transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +9 -3
- transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +9 -1
- transformers/models/lasr/configuration_lasr.py +4 -0
- transformers/models/lasr/modeling_lasr.py +3 -2
- transformers/models/lasr/modular_lasr.py +8 -1
- transformers/models/lasr/processing_lasr.py +0 -2
- transformers/models/layoutlm/modeling_layoutlm.py +5 -3
- transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +0 -1
- transformers/models/layoutlmv2/modeling_layoutlmv2.py +12 -0
- transformers/models/layoutlmv2/tokenization_layoutlmv2.py +1 -0
- transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +0 -1
- transformers/models/layoutlmv3/modeling_layoutlmv3.py +29 -5
- transformers/models/led/modeling_led.py +6 -0
- transformers/models/levit/modeling_levit.py +18 -0
- transformers/models/lfm2/modeling_lfm2.py +1 -1
- transformers/models/lfm2_moe/modeling_lfm2_moe.py +14 -4
- transformers/models/lfm2_moe/modular_lfm2_moe.py +5 -28
- transformers/models/lfm2_vl/configuration_lfm2_vl.py +4 -0
- transformers/models/lfm2_vl/modeling_lfm2_vl.py +11 -5
- transformers/models/lfm2_vl/modular_lfm2_vl.py +4 -2
- transformers/models/lfm2_vl/processing_lfm2_vl.py +82 -42
- transformers/models/lightglue/image_processing_lightglue_fast.py +1 -2
- transformers/models/lilt/modeling_lilt.py +19 -15
- transformers/models/llama/modeling_llama.py +1 -1
- transformers/models/llama4/image_processing_llama4_fast.py +1 -2
- transformers/models/llama4/modeling_llama4.py +8 -4
- transformers/models/llava/image_processing_llava_fast.py +0 -1
- transformers/models/llava/modeling_llava.py +12 -7
- transformers/models/llava_next/image_processing_llava_next_fast.py +0 -1
- transformers/models/llava_next/modeling_llava_next.py +7 -3
- transformers/models/llava_next_video/modeling_llava_next_video.py +7 -3
- transformers/models/llava_next_video/modular_llava_next_video.py +7 -3
- transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +0 -1
- transformers/models/llava_onevision/modeling_llava_onevision.py +7 -3
- transformers/models/llava_onevision/modular_llava_onevision.py +7 -4
- transformers/models/longcat_flash/modeling_longcat_flash.py +2 -1
- transformers/models/longcat_flash/modular_longcat_flash.py +1 -0
- transformers/models/longt5/modeling_longt5.py +0 -4
- transformers/models/m2m_100/modeling_m2m_100.py +10 -0
- transformers/models/mamba/modeling_mamba.py +2 -1
- transformers/models/mamba2/modeling_mamba2.py +24 -23
- transformers/models/marian/configuration_marian.py +1 -1
- transformers/models/marian/modeling_marian.py +3 -0
- transformers/models/markuplm/modeling_markuplm.py +5 -8
- transformers/models/mask2former/configuration_mask2former.py +3 -3
- transformers/models/mask2former/image_processing_mask2former_fast.py +1 -4
- transformers/models/mask2former/modeling_mask2former.py +9 -0
- transformers/models/maskformer/configuration_maskformer.py +3 -3
- transformers/models/maskformer/image_processing_maskformer_fast.py +1 -4
- transformers/models/maskformer/modeling_maskformer.py +9 -1
- transformers/models/maskformer/modeling_maskformer_swin.py +19 -15
- transformers/models/mbart/configuration_mbart.py +1 -0
- transformers/models/mbart/modeling_mbart.py +7 -0
- transformers/models/megatron_bert/modeling_megatron_bert.py +2 -0
- transformers/models/metaclip_2/modeling_metaclip_2.py +2 -0
- transformers/models/metaclip_2/modular_metaclip_2.py +2 -0
- transformers/models/mimi/modeling_mimi.py +25 -4
- transformers/models/minimax/modeling_minimax.py +16 -3
- transformers/models/minimax/modular_minimax.py +12 -1
- transformers/models/ministral/modeling_ministral.py +1 -1
- transformers/models/ministral3/modeling_ministral3.py +1 -1
- transformers/models/mistral/modeling_mistral.py +1 -1
- transformers/models/mistral3/modeling_mistral3.py +10 -4
- transformers/models/mistral3/modular_mistral3.py +3 -1
- transformers/models/mixtral/modeling_mixtral.py +12 -4
- transformers/models/mixtral/modular_mixtral.py +6 -2
- transformers/models/mlcd/modeling_mlcd.py +6 -0
- transformers/models/mlcd/modular_mlcd.py +4 -0
- transformers/models/mllama/modeling_mllama.py +13 -2
- transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +1 -2
- transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +4 -4
- transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +1 -2
- transformers/models/mobilebert/modeling_mobilebert.py +2 -0
- transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +0 -1
- transformers/models/mobilevit/image_processing_mobilevit.py +5 -5
- transformers/models/mobilevit/image_processing_mobilevit_fast.py +1 -2
- transformers/models/mobilevit/modeling_mobilevit.py +4 -0
- transformers/models/mobilevitv2/modeling_mobilevitv2.py +4 -0
- transformers/models/modernbert/modeling_modernbert.py +12 -1
- transformers/models/modernbert/modular_modernbert.py +12 -1
- transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +9 -1
- transformers/models/modernbert_decoder/modular_modernbert_decoder.py +9 -1
- transformers/models/moonshine/modeling_moonshine.py +1 -1
- transformers/models/moshi/modeling_moshi.py +21 -51
- transformers/models/mpnet/modeling_mpnet.py +2 -0
- transformers/models/mra/modeling_mra.py +4 -1
- transformers/models/mt5/configuration_mt5.py +2 -3
- transformers/models/mt5/modeling_mt5.py +0 -10
- transformers/models/musicgen/modeling_musicgen.py +5 -9
- transformers/models/musicgen_melody/modeling_musicgen_melody.py +4 -0
- transformers/models/mvp/modeling_mvp.py +7 -0
- transformers/models/nanochat/modeling_nanochat.py +1 -1
- transformers/models/nemotron/modeling_nemotron.py +3 -3
- transformers/models/nllb_moe/configuration_nllb_moe.py +1 -0
- transformers/models/nllb_moe/modeling_nllb_moe.py +10 -0
- transformers/models/nougat/image_processing_nougat_fast.py +0 -1
- transformers/models/nougat/tokenization_nougat.py +11 -16
- transformers/models/nystromformer/modeling_nystromformer.py +7 -0
- transformers/models/olmo/modeling_olmo.py +1 -1
- transformers/models/olmo2/modeling_olmo2.py +1 -1
- transformers/models/olmo3/modeling_olmo3.py +1 -1
- transformers/models/olmoe/modeling_olmoe.py +12 -4
- transformers/models/olmoe/modular_olmoe.py +4 -2
- transformers/models/omdet_turbo/configuration_omdet_turbo.py +2 -2
- transformers/models/omdet_turbo/modeling_omdet_turbo.py +4 -0
- transformers/models/oneformer/configuration_oneformer.py +3 -3
- transformers/models/oneformer/modeling_oneformer.py +7 -38
- transformers/models/openai/modeling_openai.py +12 -0
- transformers/models/ovis2/image_processing_ovis2_fast.py +0 -1
- transformers/models/ovis2/modeling_ovis2.py +15 -3
- transformers/models/ovis2/modular_ovis2.py +8 -0
- transformers/models/owlv2/image_processing_owlv2_fast.py +0 -2
- transformers/models/owlv2/modeling_owlv2.py +7 -3
- transformers/models/owlv2/modular_owlv2.py +0 -2
- transformers/models/owlvit/modeling_owlvit.py +7 -3
- transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +3 -2
- transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +28 -14
- transformers/models/paddleocr_vl/modular_paddleocr_vl.py +22 -12
- transformers/models/paligemma/modeling_paligemma.py +25 -17
- transformers/models/parakeet/modeling_parakeet.py +5 -0
- transformers/models/parakeet/modular_parakeet.py +5 -0
- transformers/models/parakeet/{tokenization_parakeet_fast.py → tokenization_parakeet.py} +3 -3
- transformers/models/patchtsmixer/modeling_patchtsmixer.py +4 -0
- transformers/models/patchtst/modeling_patchtst.py +5 -4
- transformers/models/pe_audio/__init__.py +30 -0
- transformers/models/pe_audio/configuration_pe_audio.py +206 -0
- transformers/models/pe_audio/feature_extraction_pe_audio.py +162 -0
- transformers/models/pe_audio/modeling_pe_audio.py +820 -0
- transformers/models/pe_audio/modular_pe_audio.py +299 -0
- transformers/models/pe_audio/processing_pe_audio.py +24 -0
- transformers/models/pe_audio_video/__init__.py +29 -0
- transformers/models/pe_audio_video/configuration_pe_audio_video.py +225 -0
- transformers/models/pe_audio_video/modeling_pe_audio_video.py +972 -0
- transformers/models/pe_audio_video/modular_pe_audio_video.py +764 -0
- transformers/models/pe_audio_video/processing_pe_audio_video.py +25 -0
- transformers/models/pe_video/__init__.py +30 -0
- transformers/models/pe_video/configuration_pe_video.py +211 -0
- transformers/models/pe_video/modeling_pe_video.py +636 -0
- transformers/models/pe_video/modular_pe_video.py +219 -0
- transformers/models/pe_video/processing_pe_video.py +10 -0
- transformers/models/pe_video/video_processing_pe_video.py +66 -0
- transformers/models/pegasus/configuration_pegasus.py +1 -0
- transformers/models/pegasus/modeling_pegasus.py +3 -0
- transformers/models/pegasus_x/modeling_pegasus_x.py +1 -0
- transformers/models/perceiver/image_processing_perceiver_fast.py +0 -1
- transformers/models/perceiver/modeling_perceiver.py +5 -1
- transformers/models/perception_lm/image_processing_perception_lm_fast.py +0 -1
- transformers/models/perception_lm/modeling_perception_lm.py +7 -3
- transformers/models/perception_lm/modular_perception_lm.py +7 -3
- transformers/models/persimmon/modeling_persimmon.py +1 -1
- transformers/models/phi/modeling_phi.py +1 -1
- transformers/models/phi3/modeling_phi3.py +1 -1
- transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +4 -1
- transformers/models/phi4_multimodal/modular_phi4_multimodal.py +3 -0
- transformers/models/phi4_multimodal/processing_phi4_multimodal.py +0 -2
- transformers/models/phimoe/modeling_phimoe.py +12 -4
- transformers/models/phimoe/modular_phimoe.py +1 -1
- transformers/models/pix2struct/processing_pix2struct.py +0 -4
- transformers/models/pixio/__init__.py +30 -0
- transformers/models/pixio/configuration_pixio.py +151 -0
- transformers/models/pixio/modeling_pixio.py +507 -0
- transformers/models/pixio/modular_pixio.py +404 -0
- transformers/models/pixtral/modeling_pixtral.py +1 -1
- transformers/models/pixtral/processing_pixtral.py +3 -1
- transformers/models/plbart/configuration_plbart.py +1 -0
- transformers/models/plbart/modeling_plbart.py +7 -0
- transformers/models/plbart/modular_plbart.py +6 -0
- transformers/models/poolformer/image_processing_poolformer_fast.py +0 -1
- transformers/models/poolformer/modeling_poolformer.py +11 -1
- transformers/models/pop2piano/configuration_pop2piano.py +0 -1
- transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +2 -3
- transformers/models/prophetnet/modeling_prophetnet.py +2 -1
- transformers/models/qwen2/modeling_qwen2.py +1 -1
- transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +104 -64
- transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +58 -18
- transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +18 -5
- transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +26 -22
- transformers/models/qwen2_audio/modeling_qwen2_audio.py +2 -2
- transformers/models/qwen2_moe/modeling_qwen2_moe.py +12 -4
- transformers/models/qwen2_vl/image_processing_qwen2_vl.py +3 -2
- transformers/models/qwen2_vl/modeling_qwen2_vl.py +17 -4
- transformers/models/qwen3/modeling_qwen3.py +1 -1
- transformers/models/qwen3_moe/modeling_qwen3_moe.py +12 -4
- transformers/models/qwen3_next/modeling_qwen3_next.py +4 -6
- transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +4 -0
- transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +92 -46
- transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +48 -4
- transformers/models/qwen3_vl/configuration_qwen3_vl.py +5 -5
- transformers/models/qwen3_vl/modeling_qwen3_vl.py +17 -4
- transformers/models/qwen3_vl/modular_qwen3_vl.py +21 -10
- transformers/models/qwen3_vl/processing_qwen3_vl.py +3 -3
- transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +94 -112
- transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +32 -81
- transformers/models/rag/configuration_rag.py +0 -8
- transformers/models/rag/modeling_rag.py +7 -9
- transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +3 -2
- transformers/models/reformer/modeling_reformer.py +9 -1
- transformers/models/regnet/modeling_regnet.py +4 -0
- transformers/models/rembert/modeling_rembert.py +7 -1
- transformers/models/resnet/modeling_resnet.py +8 -3
- transformers/models/roberta/modeling_roberta.py +3 -0
- transformers/models/roberta/modular_roberta.py +3 -0
- transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +3 -0
- transformers/models/roc_bert/modeling_roc_bert.py +3 -0
- transformers/models/rt_detr/configuration_rt_detr.py +1 -1
- transformers/models/rt_detr/modeling_rt_detr.py +4 -0
- transformers/models/rt_detr/modeling_rt_detr_resnet.py +8 -3
- transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +2 -3
- transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +7 -0
- transformers/models/rt_detr_v2/modular_rt_detr_v2.py +8 -3
- transformers/models/rwkv/modeling_rwkv.py +1 -1
- transformers/models/sam/configuration_sam.py +1 -0
- transformers/models/sam/image_processing_sam_fast.py +0 -1
- transformers/models/sam/modeling_sam.py +4 -1
- transformers/models/sam2/configuration_sam2.py +1 -1
- transformers/models/sam2/modeling_sam2.py +5 -1
- transformers/models/sam2/modular_sam2.py +5 -1
- transformers/models/sam2_video/modeling_sam2_video.py +51 -43
- transformers/models/sam2_video/modular_sam2_video.py +31 -18
- transformers/models/sam3/configuration_sam3.py +21 -1
- transformers/models/sam3/modeling_sam3.py +23 -0
- transformers/models/sam3_tracker/modeling_sam3_tracker.py +2 -0
- transformers/models/sam3_tracker/modular_sam3_tracker.py +2 -0
- transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +25 -0
- transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +26 -15
- transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +25 -2
- transformers/models/sam3_video/configuration_sam3_video.py +14 -0
- transformers/models/sam3_video/modeling_sam3_video.py +3 -3
- transformers/models/sam3_video/processing_sam3_video.py +1 -1
- transformers/models/sam_hq/configuration_sam_hq.py +1 -0
- transformers/models/sam_hq/modeling_sam_hq.py +26 -23
- transformers/models/seamless_m4t/modeling_seamless_m4t.py +27 -11
- transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +6 -0
- transformers/models/seed_oss/modeling_seed_oss.py +1 -1
- transformers/models/segformer/image_processing_segformer_fast.py +0 -1
- transformers/models/segformer/modeling_segformer.py +2 -2
- transformers/models/segformer/modular_segformer.py +0 -1
- transformers/models/shieldgemma2/modeling_shieldgemma2.py +1 -0
- transformers/models/siglip/modeling_siglip.py +24 -2
- transformers/models/siglip2/modeling_siglip2.py +63 -41
- transformers/models/smollm3/modeling_smollm3.py +1 -1
- transformers/models/smolvlm/modeling_smolvlm.py +5 -1
- transformers/models/smolvlm/video_processing_smolvlm.py +0 -1
- transformers/models/speech_to_text/modeling_speech_to_text.py +10 -0
- transformers/models/speecht5/modeling_speecht5.py +28 -0
- transformers/models/splinter/modeling_splinter.py +9 -3
- transformers/models/squeezebert/modeling_squeezebert.py +2 -0
- transformers/models/stablelm/modeling_stablelm.py +1 -1
- transformers/models/starcoder2/modeling_starcoder2.py +1 -1
- transformers/models/superglue/image_processing_superglue_fast.py +1 -2
- transformers/models/superpoint/image_processing_superpoint_fast.py +1 -2
- transformers/models/swiftformer/modeling_swiftformer.py +4 -0
- transformers/models/swin/modeling_swin.py +16 -12
- transformers/models/swin2sr/image_processing_swin2sr_fast.py +0 -1
- transformers/models/swin2sr/modeling_swin2sr.py +49 -33
- transformers/models/swinv2/modeling_swinv2.py +41 -33
- transformers/models/switch_transformers/modeling_switch_transformers.py +2 -8
- transformers/models/switch_transformers/modular_switch_transformers.py +2 -8
- transformers/models/t5/configuration_t5.py +7 -1
- transformers/models/t5/modeling_t5.py +1 -7
- transformers/models/t5gemma/modeling_t5gemma.py +1 -1
- transformers/models/t5gemma2/configuration_t5gemma2.py +6 -42
- transformers/models/t5gemma2/modeling_t5gemma2.py +13 -4
- transformers/models/t5gemma2/modular_t5gemma2.py +289 -4
- transformers/models/table_transformer/configuration_table_transformer.py +1 -1
- transformers/models/table_transformer/modeling_table_transformer.py +1 -1
- transformers/models/textnet/image_processing_textnet_fast.py +0 -1
- transformers/models/timesfm/modeling_timesfm.py +12 -0
- transformers/models/timesfm/modular_timesfm.py +12 -0
- transformers/models/timm_backbone/modeling_timm_backbone.py +13 -9
- transformers/models/timm_wrapper/configuration_timm_wrapper.py +3 -0
- transformers/models/timm_wrapper/modeling_timm_wrapper.py +19 -13
- transformers/models/trocr/modeling_trocr.py +1 -2
- transformers/models/tvp/configuration_tvp.py +5 -1
- transformers/models/tvp/modeling_tvp.py +4 -4
- transformers/models/udop/configuration_udop.py +1 -0
- transformers/models/udop/modeling_udop.py +3 -7
- transformers/models/umt5/configuration_umt5.py +2 -2
- transformers/models/umt5/modeling_umt5.py +0 -6
- transformers/models/vaultgemma/modeling_vaultgemma.py +1 -1
- transformers/models/video_llama_3/image_processing_video_llama_3.py +3 -2
- transformers/models/video_llama_3/modeling_video_llama_3.py +12 -1
- transformers/models/video_llama_3/modular_video_llama_3.py +10 -1
- transformers/models/video_llava/modeling_video_llava.py +7 -3
- transformers/models/vilt/configuration_vilt.py +2 -2
- transformers/models/vilt/modeling_vilt.py +7 -0
- transformers/models/vipllava/modeling_vipllava.py +7 -3
- transformers/models/visual_bert/modeling_visual_bert.py +2 -0
- transformers/models/vitmatte/configuration_vitmatte.py +1 -1
- transformers/models/vitmatte/image_processing_vitmatte_fast.py +0 -1
- transformers/models/vitmatte/modeling_vitmatte.py +4 -0
- transformers/models/vitpose/configuration_vitpose.py +1 -1
- transformers/models/vitpose/image_processing_vitpose_fast.py +0 -1
- transformers/models/voxtral/modeling_voxtral.py +2 -2
- transformers/models/voxtral/modular_voxtral.py +2 -2
- transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +16 -10
- transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +7 -0
- transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +21 -11
- transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +21 -11
- transformers/models/whisper/generation_whisper.py +1 -0
- transformers/models/whisper/modeling_whisper.py +5 -3
- transformers/models/x_clip/modeling_x_clip.py +2 -0
- transformers/models/xcodec/modeling_xcodec.py +5 -0
- transformers/models/xglm/modeling_xglm.py +10 -0
- transformers/models/xlm/modeling_xlm.py +13 -14
- transformers/models/xlm_roberta/modeling_xlm_roberta.py +109 -106
- transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +3 -0
- transformers/models/xlnet/modeling_xlnet.py +3 -1
- transformers/models/xmod/modeling_xmod.py +3 -0
- transformers/models/yoso/modeling_yoso.py +4 -1
- transformers/models/zamba/modeling_zamba.py +2 -1
- transformers/models/zamba2/modeling_zamba2.py +3 -2
- transformers/models/zoedepth/configuration_zoedepth.py +1 -1
- transformers/models/zoedepth/image_processing_zoedepth_fast.py +1 -3
- transformers/models/zoedepth/modeling_zoedepth.py +7 -0
- transformers/pipelines/__init__.py +9 -6
- transformers/pipelines/automatic_speech_recognition.py +20 -12
- transformers/pipelines/base.py +1 -1
- transformers/pipelines/document_question_answering.py +1 -1
- transformers/pipelines/question_answering.py +1 -1
- transformers/pipelines/text_to_audio.py +2 -2
- transformers/processing_utils.py +127 -56
- transformers/quantizers/auto.py +2 -4
- transformers/quantizers/base.py +9 -64
- transformers/quantizers/quantizer_aqlm.py +1 -18
- transformers/quantizers/quantizer_auto_round.py +1 -10
- transformers/quantizers/quantizer_awq.py +3 -8
- transformers/quantizers/quantizer_bitnet.py +1 -6
- transformers/quantizers/quantizer_bnb_4bit.py +9 -49
- transformers/quantizers/quantizer_bnb_8bit.py +9 -19
- transformers/quantizers/quantizer_compressed_tensors.py +1 -4
- transformers/quantizers/quantizer_eetq.py +2 -12
- transformers/quantizers/quantizer_fbgemm_fp8.py +5 -14
- transformers/quantizers/quantizer_finegrained_fp8.py +15 -10
- transformers/quantizers/quantizer_fp_quant.py +4 -4
- transformers/quantizers/quantizer_gptq.py +1 -4
- transformers/quantizers/quantizer_higgs.py +2 -6
- transformers/quantizers/quantizer_mxfp4.py +2 -28
- transformers/quantizers/quantizer_quanto.py +14 -14
- transformers/quantizers/quantizer_spqr.py +3 -8
- transformers/quantizers/quantizer_torchao.py +28 -124
- transformers/quantizers/quantizer_vptq.py +1 -10
- transformers/testing_utils.py +28 -12
- transformers/tokenization_mistral_common.py +3 -2
- transformers/tokenization_utils_base.py +3 -2
- transformers/tokenization_utils_tokenizers.py +25 -2
- transformers/trainer.py +24 -2
- transformers/trainer_callback.py +8 -0
- transformers/trainer_seq2seq.py +4 -0
- transformers/training_args.py +8 -10
- transformers/utils/__init__.py +4 -0
- transformers/utils/attention_visualizer.py +4 -4
- transformers/utils/auto_docstring.py +34 -25
- transformers/utils/generic.py +20 -0
- transformers/utils/import_utils.py +51 -9
- transformers/utils/kernel_config.py +71 -18
- transformers/utils/quantization_config.py +8 -8
- transformers/video_processing_utils.py +16 -12
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/METADATA +5 -6
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/RECORD +671 -632
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/WHEEL +0 -0
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/entry_points.txt +0 -0
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/licenses/LICENSE +0 -0
- {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,456 @@
|
|
|
1
|
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
2
|
+
# This file was automatically generated from src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py.
|
|
3
|
+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
|
4
|
+
# the file from the modular. If any change should be done, please apply the change to the
|
|
5
|
+
# modular_ernie4_5_vl_moe.py file directly. One of our CI enforces this.
|
|
6
|
+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
7
|
+
# coding=utf-8
|
|
8
|
+
# Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
|
|
9
|
+
#
|
|
10
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
11
|
+
# you may not use this file except in compliance with the License.
|
|
12
|
+
# You may obtain a copy of the License at
|
|
13
|
+
#
|
|
14
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
15
|
+
#
|
|
16
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
17
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
18
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
19
|
+
# See the License for the specific language governing permissions and
|
|
20
|
+
# limitations under the License.
|
|
21
|
+
import math
|
|
22
|
+
from typing import Optional, Union
|
|
23
|
+
|
|
24
|
+
import numpy as np
|
|
25
|
+
|
|
26
|
+
from ...image_processing_utils import BaseImageProcessor, BatchFeature
|
|
27
|
+
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
|
|
28
|
+
from ...image_utils import (
|
|
29
|
+
OPENAI_CLIP_MEAN,
|
|
30
|
+
OPENAI_CLIP_STD,
|
|
31
|
+
ChannelDimension,
|
|
32
|
+
ImageInput,
|
|
33
|
+
PILImageResampling,
|
|
34
|
+
get_image_size,
|
|
35
|
+
infer_channel_dimension_format,
|
|
36
|
+
is_scaled_image,
|
|
37
|
+
make_flat_list_of_images,
|
|
38
|
+
make_list_of_images,
|
|
39
|
+
to_numpy_array,
|
|
40
|
+
valid_images,
|
|
41
|
+
validate_preprocess_arguments,
|
|
42
|
+
)
|
|
43
|
+
from ...processing_utils import ImagesKwargs
|
|
44
|
+
from ...utils import TensorType, logging
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
logger = logging.get_logger(__name__)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class Ernie4_5_VL_MoeImageProcessorKwargs(ImagesKwargs, total=False):
|
|
51
|
+
r"""
|
|
52
|
+
patch_size (`int`, *optional*, defaults to 14):
|
|
53
|
+
The spatial patch size of the vision encoder.
|
|
54
|
+
temporal_patch_size (`int`, *optional*):
|
|
55
|
+
The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
|
|
56
|
+
merge_size (`int`, *optional*, defaults to 2):
|
|
57
|
+
The merge size of the vision encoder to llm encoder.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
patch_size: int
|
|
61
|
+
temporal_patch_size: int
|
|
62
|
+
merge_size: int
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def smart_resize(
|
|
66
|
+
height: int, width: int, factor: int = 28, min_pixels: int = 56 * 56, max_pixels: int = 14 * 14 * 4 * 1280
|
|
67
|
+
):
|
|
68
|
+
"""Rescales the image so that the following conditions are met:
|
|
69
|
+
|
|
70
|
+
1. Both dimensions (height and width) are divisible by 'factor'.
|
|
71
|
+
|
|
72
|
+
2. The total number of pixels is within the range ['min_pixels', 'max_pixels'].
|
|
73
|
+
|
|
74
|
+
3. The aspect ratio of the image is maintained as closely as possible.
|
|
75
|
+
|
|
76
|
+
"""
|
|
77
|
+
if max(height, width) / min(height, width) > 200:
|
|
78
|
+
raise ValueError(
|
|
79
|
+
f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
|
|
80
|
+
)
|
|
81
|
+
h_bar = round(height / factor) * factor
|
|
82
|
+
w_bar = round(width / factor) * factor
|
|
83
|
+
if h_bar * w_bar > max_pixels:
|
|
84
|
+
beta = math.sqrt((height * width) / max_pixels)
|
|
85
|
+
h_bar = max(factor, math.floor(height / beta / factor) * factor)
|
|
86
|
+
w_bar = max(factor, math.floor(width / beta / factor) * factor)
|
|
87
|
+
elif h_bar * w_bar < min_pixels:
|
|
88
|
+
beta = math.sqrt(min_pixels / (height * width))
|
|
89
|
+
h_bar = math.ceil(height * beta / factor) * factor
|
|
90
|
+
w_bar = math.ceil(width * beta / factor) * factor
|
|
91
|
+
return h_bar, w_bar
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class Ernie4_5_VL_MoeImageProcessor(BaseImageProcessor):
|
|
95
|
+
r"""
|
|
96
|
+
Constructs a Ernie 4.5 VL image processor that dynamically resizes images based on the original images.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
do_resize (`bool`, *optional*, defaults to `True`):
|
|
100
|
+
Whether to resize the image's (height, width) dimensions.
|
|
101
|
+
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}`):
|
|
102
|
+
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
|
|
103
|
+
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
|
|
104
|
+
Resampling filter to use when resizing the image.
|
|
105
|
+
do_rescale (`bool`, *optional*, defaults to `True`):
|
|
106
|
+
Whether to rescale the image by the specified scale `rescale_factor`.
|
|
107
|
+
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
|
|
108
|
+
Scale factor to use if rescaling the image.
|
|
109
|
+
do_normalize (`bool`, *optional*, defaults to `True`):
|
|
110
|
+
Whether to normalize the image.
|
|
111
|
+
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
|
|
112
|
+
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
|
|
113
|
+
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
|
|
114
|
+
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel
|
|
115
|
+
in the image.
|
|
116
|
+
do_convert_rgb (`bool`, *optional*, defaults to `True`):
|
|
117
|
+
Whether to convert the image to RGB.
|
|
118
|
+
patch_size (`int`, *optional*, defaults to 14):
|
|
119
|
+
The spatial patch size of the vision encoder.
|
|
120
|
+
temporal_patch_size (`int`, *optional*):
|
|
121
|
+
The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
|
|
122
|
+
merge_size (`int`, *optional*, defaults to 2):
|
|
123
|
+
The merge size of the vision encoder to llm encoder.
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
model_input_names = ["pixel_values", "image_grid_thw"]
|
|
127
|
+
valid_kwargs = Ernie4_5_VL_MoeImageProcessorKwargs
|
|
128
|
+
|
|
129
|
+
def __init__(
|
|
130
|
+
self,
|
|
131
|
+
do_resize: bool = True,
|
|
132
|
+
size: Optional[dict[str, int]] = None,
|
|
133
|
+
resample: PILImageResampling = PILImageResampling.BICUBIC,
|
|
134
|
+
do_rescale: bool = True,
|
|
135
|
+
rescale_factor: Union[int, float] = 1 / 255,
|
|
136
|
+
do_normalize: bool = True,
|
|
137
|
+
image_mean: Optional[Union[float, list[float]]] = None,
|
|
138
|
+
image_std: Optional[Union[float, list[float]]] = None,
|
|
139
|
+
do_convert_rgb: bool = True,
|
|
140
|
+
patch_size: int = 14,
|
|
141
|
+
temporal_patch_size: Optional[int] = None,
|
|
142
|
+
merge_size: int = 2,
|
|
143
|
+
**kwargs,
|
|
144
|
+
) -> None:
|
|
145
|
+
super().__init__(**kwargs)
|
|
146
|
+
if size is not None:
|
|
147
|
+
if "shortest_edge" not in size or "longest_edge" not in size:
|
|
148
|
+
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
|
|
149
|
+
size = {"shortest_edge": size["shortest_edge"], "longest_edge": size["longest_edge"]}
|
|
150
|
+
else:
|
|
151
|
+
size = {"shortest_edge": 56 * 56, "longest_edge": 6177 * 28 * 28}
|
|
152
|
+
self.size = size
|
|
153
|
+
|
|
154
|
+
self.do_resize = do_resize
|
|
155
|
+
self.resample = resample
|
|
156
|
+
self.do_rescale = do_rescale
|
|
157
|
+
self.rescale_factor = rescale_factor
|
|
158
|
+
self.do_normalize = do_normalize
|
|
159
|
+
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
|
|
160
|
+
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
|
|
161
|
+
|
|
162
|
+
self.patch_size = patch_size
|
|
163
|
+
self.temporal_patch_size = temporal_patch_size
|
|
164
|
+
self.merge_size = merge_size
|
|
165
|
+
self.do_convert_rgb = do_convert_rgb
|
|
166
|
+
|
|
167
|
+
def _preprocess(
|
|
168
|
+
self,
|
|
169
|
+
images: ImageInput,
|
|
170
|
+
do_resize: Optional[bool] = None,
|
|
171
|
+
size: Optional[dict[str, int]] = None,
|
|
172
|
+
resample: PILImageResampling = None,
|
|
173
|
+
do_rescale: Optional[bool] = None,
|
|
174
|
+
rescale_factor: Optional[float] = None,
|
|
175
|
+
do_normalize: Optional[bool] = None,
|
|
176
|
+
image_mean: Optional[Union[float, list[float]]] = None,
|
|
177
|
+
image_std: Optional[Union[float, list[float]]] = None,
|
|
178
|
+
patch_size: Optional[int] = None,
|
|
179
|
+
temporal_patch_size: Optional[int] = None,
|
|
180
|
+
merge_size: Optional[int] = None,
|
|
181
|
+
do_convert_rgb: Optional[bool] = None,
|
|
182
|
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
|
183
|
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
|
184
|
+
):
|
|
185
|
+
"""
|
|
186
|
+
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
images (`ImageInput`):
|
|
190
|
+
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
|
|
191
|
+
vision_info (`list[Dict]`, *optional*):
|
|
192
|
+
Optional list of dictionaries containing additional information about vision inputs.
|
|
193
|
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
|
194
|
+
Whether to resize the image.
|
|
195
|
+
size (`dict[str, int]`, *optional*, defaults to `self.size`):
|
|
196
|
+
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
|
|
197
|
+
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
|
|
198
|
+
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
|
|
199
|
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
|
200
|
+
Whether to rescale the image.
|
|
201
|
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
|
202
|
+
Scale factor to use if rescaling the image.
|
|
203
|
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
|
204
|
+
Whether to normalize the image.
|
|
205
|
+
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
|
|
206
|
+
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
|
|
207
|
+
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
|
|
208
|
+
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
|
|
209
|
+
patch_size (`int`, *optional*, defaults to `self.patch_size`):
|
|
210
|
+
The spatial patch size of the vision encoder.
|
|
211
|
+
temporal_patch_size (`int`, *optional*):
|
|
212
|
+
The temporal patch size of the vision encoder. Unused in the image processor, only used for videos.
|
|
213
|
+
merge_size (`int`, *optional*, defaults to `self.merge_size`):
|
|
214
|
+
The merge size of the vision encoder to llm encoder.
|
|
215
|
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
|
216
|
+
Whether to convert the image to RGB.
|
|
217
|
+
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
|
|
218
|
+
The channel dimension format for the output image. Can be one of:
|
|
219
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
220
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
221
|
+
- Unset: Use the channel dimension format of the input image.
|
|
222
|
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
|
223
|
+
The channel dimension format for the input image. Can be one of:
|
|
224
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
225
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
226
|
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
|
227
|
+
"""
|
|
228
|
+
images = make_list_of_images(images)
|
|
229
|
+
|
|
230
|
+
if do_convert_rgb:
|
|
231
|
+
images = [convert_to_rgb(image) for image in images]
|
|
232
|
+
|
|
233
|
+
# All transformations expect numpy arrays.
|
|
234
|
+
images = [to_numpy_array(image) for image in images]
|
|
235
|
+
|
|
236
|
+
if do_rescale and is_scaled_image(images[0]):
|
|
237
|
+
logger.warning_once(
|
|
238
|
+
"It looks like you are trying to rescale already rescaled images. If the input"
|
|
239
|
+
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
|
|
240
|
+
)
|
|
241
|
+
if input_data_format is None:
|
|
242
|
+
# We assume that all images have the same channel dimension format.
|
|
243
|
+
input_data_format = infer_channel_dimension_format(images[0])
|
|
244
|
+
|
|
245
|
+
height, width = get_image_size(images[0], channel_dim=input_data_format)
|
|
246
|
+
resized_height, resized_width = height, width
|
|
247
|
+
processed_images = []
|
|
248
|
+
for image in images:
|
|
249
|
+
if do_resize:
|
|
250
|
+
resized_height, resized_width = smart_resize(
|
|
251
|
+
height,
|
|
252
|
+
width,
|
|
253
|
+
factor=patch_size * merge_size,
|
|
254
|
+
min_pixels=size["shortest_edge"],
|
|
255
|
+
max_pixels=size["longest_edge"],
|
|
256
|
+
)
|
|
257
|
+
image = resize(
|
|
258
|
+
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
if do_rescale:
|
|
262
|
+
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
|
|
263
|
+
|
|
264
|
+
if do_normalize:
|
|
265
|
+
image = self.normalize(
|
|
266
|
+
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
|
|
270
|
+
processed_images.append(image)
|
|
271
|
+
|
|
272
|
+
patches = np.array(processed_images)
|
|
273
|
+
if data_format == ChannelDimension.LAST:
|
|
274
|
+
patches = patches.transpose([0, 3, 1, 2])
|
|
275
|
+
|
|
276
|
+
# Main difference to Qwen2 VL - no temporal patches
|
|
277
|
+
channel = patches.shape[1]
|
|
278
|
+
grid_t = patches.shape[0]
|
|
279
|
+
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
|
|
280
|
+
patches = patches.reshape(
|
|
281
|
+
[
|
|
282
|
+
grid_t,
|
|
283
|
+
channel,
|
|
284
|
+
grid_h // merge_size,
|
|
285
|
+
merge_size,
|
|
286
|
+
patch_size,
|
|
287
|
+
grid_w // merge_size,
|
|
288
|
+
merge_size,
|
|
289
|
+
patch_size,
|
|
290
|
+
]
|
|
291
|
+
)
|
|
292
|
+
# [grid_t, grid_h/merge, grid_w/merge, merge, merge, channel, patch, patch]
|
|
293
|
+
patches = patches.transpose([0, 2, 5, 3, 6, 1, 4, 7])
|
|
294
|
+
flatten_patches = patches.reshape(grid_t * grid_h * grid_w, channel * patch_size * patch_size)
|
|
295
|
+
|
|
296
|
+
return flatten_patches, (grid_t, grid_h, grid_w)
|
|
297
|
+
|
|
298
|
+
def preprocess(
|
|
299
|
+
self,
|
|
300
|
+
images: ImageInput,
|
|
301
|
+
do_resize: Optional[bool] = None,
|
|
302
|
+
size: Optional[dict[str, int]] = None,
|
|
303
|
+
resample: Optional[PILImageResampling] = None,
|
|
304
|
+
do_rescale: Optional[bool] = None,
|
|
305
|
+
rescale_factor: Optional[float] = None,
|
|
306
|
+
do_normalize: Optional[bool] = None,
|
|
307
|
+
image_mean: Optional[Union[float, list[float]]] = None,
|
|
308
|
+
image_std: Optional[Union[float, list[float]]] = None,
|
|
309
|
+
patch_size: Optional[int] = None,
|
|
310
|
+
temporal_patch_size: Optional[int] = None,
|
|
311
|
+
merge_size: Optional[int] = None,
|
|
312
|
+
do_convert_rgb: Optional[bool] = None,
|
|
313
|
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
|
314
|
+
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
|
|
315
|
+
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
|
316
|
+
):
|
|
317
|
+
"""
|
|
318
|
+
Args:
|
|
319
|
+
images (`ImageInput`):
|
|
320
|
+
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
|
|
321
|
+
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
|
|
322
|
+
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
|
|
323
|
+
Whether to resize the image.
|
|
324
|
+
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
|
|
325
|
+
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
|
|
326
|
+
the longest edge resized to keep the input aspect ratio.
|
|
327
|
+
resample (`int`, *optional*, defaults to `self.resample`):
|
|
328
|
+
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
|
|
329
|
+
has an effect if `do_resize` is set to `True`.
|
|
330
|
+
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
|
|
331
|
+
Whether to rescale the image.
|
|
332
|
+
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
|
|
333
|
+
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
|
|
334
|
+
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
|
|
335
|
+
Whether to normalize the image.
|
|
336
|
+
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
|
|
337
|
+
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
|
|
338
|
+
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
|
|
339
|
+
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
|
|
340
|
+
`True`.
|
|
341
|
+
patch_size (`int`, *optional*, defaults to `self.patch_size`):
|
|
342
|
+
The spatial patch size of the vision encoder.
|
|
343
|
+
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
|
|
344
|
+
The temporal patch size of the vision encoder.
|
|
345
|
+
merge_size (`int`, *optional*, defaults to `self.merge_size`):
|
|
346
|
+
The merge size of the vision encoder to llm encoder.
|
|
347
|
+
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
|
|
348
|
+
Whether to convert the image to RGB.
|
|
349
|
+
return_tensors (`str` or `TensorType`, *optional*):
|
|
350
|
+
The type of tensors to return. Can be one of:
|
|
351
|
+
- Unset: Return a list of `np.ndarray`.
|
|
352
|
+
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
|
353
|
+
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
|
|
354
|
+
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
|
|
355
|
+
The channel dimension format for the output image. Can be one of:
|
|
356
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
357
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
358
|
+
- Unset: Use the channel dimension format of the input image.
|
|
359
|
+
input_data_format (`ChannelDimension` or `str`, *optional*):
|
|
360
|
+
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
|
361
|
+
from the input image. Can be one of:
|
|
362
|
+
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
363
|
+
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
364
|
+
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
|
|
365
|
+
|
|
366
|
+
"""
|
|
367
|
+
size = size if size is not None else self.size
|
|
368
|
+
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
|
|
369
|
+
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
|
|
370
|
+
|
|
371
|
+
do_resize = do_resize if do_resize is not None else self.do_resize
|
|
372
|
+
resample = resample if resample is not None else self.resample
|
|
373
|
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
|
374
|
+
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
|
|
375
|
+
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
|
|
376
|
+
image_mean = image_mean if image_mean is not None else self.image_mean
|
|
377
|
+
image_std = image_std if image_std is not None else self.image_std
|
|
378
|
+
patch_size = patch_size if patch_size is not None else self.patch_size
|
|
379
|
+
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
|
|
380
|
+
merge_size = merge_size if merge_size is not None else self.merge_size
|
|
381
|
+
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
|
|
382
|
+
|
|
383
|
+
if images is not None:
|
|
384
|
+
images = self.fetch_images(images)
|
|
385
|
+
images = make_flat_list_of_images(images)
|
|
386
|
+
|
|
387
|
+
if images is not None and not valid_images(images):
|
|
388
|
+
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
|
|
389
|
+
|
|
390
|
+
validate_preprocess_arguments(
|
|
391
|
+
rescale_factor=rescale_factor,
|
|
392
|
+
do_normalize=do_normalize,
|
|
393
|
+
image_mean=image_mean,
|
|
394
|
+
image_std=image_std,
|
|
395
|
+
do_resize=do_resize,
|
|
396
|
+
size=size,
|
|
397
|
+
resample=resample,
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
data = {}
|
|
401
|
+
if images is not None:
|
|
402
|
+
pixel_values, vision_grid_thws = [], []
|
|
403
|
+
for image in images:
|
|
404
|
+
patches, image_grid_thw = self._preprocess(
|
|
405
|
+
image,
|
|
406
|
+
do_resize=do_resize,
|
|
407
|
+
size=size,
|
|
408
|
+
resample=resample,
|
|
409
|
+
do_rescale=do_rescale,
|
|
410
|
+
rescale_factor=rescale_factor,
|
|
411
|
+
do_normalize=do_normalize,
|
|
412
|
+
image_mean=image_mean,
|
|
413
|
+
image_std=image_std,
|
|
414
|
+
patch_size=patch_size,
|
|
415
|
+
temporal_patch_size=temporal_patch_size,
|
|
416
|
+
merge_size=merge_size,
|
|
417
|
+
data_format=data_format,
|
|
418
|
+
do_convert_rgb=do_convert_rgb,
|
|
419
|
+
input_data_format=input_data_format,
|
|
420
|
+
)
|
|
421
|
+
pixel_values.extend(patches)
|
|
422
|
+
vision_grid_thws.append(image_grid_thw)
|
|
423
|
+
pixel_values = np.array(pixel_values)
|
|
424
|
+
vision_grid_thws = np.array(vision_grid_thws)
|
|
425
|
+
data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws})
|
|
426
|
+
|
|
427
|
+
return BatchFeature(data=data, tensor_type=return_tensors)
|
|
428
|
+
|
|
429
|
+
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
|
|
430
|
+
"""
|
|
431
|
+
A utility that returns number of image patches for a given image size.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
height (`int`):
|
|
435
|
+
Height of the input image.
|
|
436
|
+
width (`int`):
|
|
437
|
+
Width of the input image.
|
|
438
|
+
images_kwargs (`dict`, *optional*)
|
|
439
|
+
Any kwargs to override defaults of the image processor.
|
|
440
|
+
Returns:
|
|
441
|
+
`int`: Number of image patches per image.
|
|
442
|
+
"""
|
|
443
|
+
min_pixels = self.size["shortest_edge"]
|
|
444
|
+
max_pixels = self.size["longest_edge"]
|
|
445
|
+
patch_size = images_kwargs.get("patch_size", self.patch_size)
|
|
446
|
+
merge_size = images_kwargs.get("merge_size", self.merge_size)
|
|
447
|
+
|
|
448
|
+
factor = patch_size * merge_size
|
|
449
|
+
resized_height, resized_width = smart_resize(
|
|
450
|
+
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
|
|
451
|
+
)
|
|
452
|
+
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
|
|
453
|
+
return grid_h * grid_w
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
__all__ = ["Ernie4_5_VL_MoeImageProcessor"]
|