transformers 5.0.0rc1__py3-none-any.whl → 5.0.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1584) hide show
  1. transformers/__init__.py +27 -27
  2. transformers/activations.py +1 -1
  3. transformers/audio_utils.py +32 -33
  4. transformers/cache_utils.py +32 -139
  5. transformers/cli/chat.py +3 -3
  6. transformers/cli/serve.py +2 -2
  7. transformers/cli/transformers.py +2 -1
  8. transformers/configuration_utils.py +143 -101
  9. transformers/conversion_mapping.py +73 -6
  10. transformers/convert_slow_tokenizer.py +3 -8
  11. transformers/core_model_loading.py +215 -50
  12. transformers/data/processors/glue.py +0 -1
  13. transformers/data/processors/utils.py +0 -1
  14. transformers/data/processors/xnli.py +0 -1
  15. transformers/dependency_versions_table.py +5 -5
  16. transformers/distributed/configuration_utils.py +1 -2
  17. transformers/dynamic_module_utils.py +23 -23
  18. transformers/feature_extraction_sequence_utils.py +19 -23
  19. transformers/feature_extraction_utils.py +63 -31
  20. transformers/generation/candidate_generator.py +80 -33
  21. transformers/generation/configuration_utils.py +186 -131
  22. transformers/generation/continuous_batching/__init__.py +0 -1
  23. transformers/generation/continuous_batching/cache.py +81 -24
  24. transformers/generation/continuous_batching/cache_manager.py +155 -45
  25. transformers/generation/continuous_batching/continuous_api.py +152 -84
  26. transformers/generation/continuous_batching/requests.py +51 -3
  27. transformers/generation/continuous_batching/scheduler.py +127 -52
  28. transformers/generation/logits_process.py +0 -128
  29. transformers/generation/stopping_criteria.py +1 -1
  30. transformers/generation/streamers.py +0 -1
  31. transformers/generation/utils.py +107 -119
  32. transformers/generation/watermarking.py +8 -6
  33. transformers/hf_argparser.py +9 -13
  34. transformers/hyperparameter_search.py +1 -2
  35. transformers/image_processing_base.py +11 -21
  36. transformers/image_processing_utils.py +11 -12
  37. transformers/image_processing_utils_fast.py +68 -57
  38. transformers/image_transforms.py +29 -29
  39. transformers/image_utils.py +30 -32
  40. transformers/initialization.py +37 -0
  41. transformers/integrations/__init__.py +12 -0
  42. transformers/integrations/accelerate.py +44 -111
  43. transformers/integrations/aqlm.py +3 -5
  44. transformers/integrations/awq.py +3 -8
  45. transformers/integrations/bitnet.py +5 -8
  46. transformers/integrations/bitsandbytes.py +16 -15
  47. transformers/integrations/deepspeed.py +19 -4
  48. transformers/integrations/eetq.py +3 -6
  49. transformers/integrations/fbgemm_fp8.py +2 -3
  50. transformers/integrations/finegrained_fp8.py +14 -23
  51. transformers/integrations/flash_attention.py +2 -2
  52. transformers/integrations/flex_attention.py +1 -1
  53. transformers/integrations/fp_quant.py +4 -6
  54. transformers/integrations/ggml.py +0 -1
  55. transformers/integrations/higgs.py +2 -5
  56. transformers/integrations/hub_kernels.py +23 -5
  57. transformers/integrations/integration_utils.py +37 -3
  58. transformers/integrations/mistral.py +12 -0
  59. transformers/integrations/moe.py +240 -0
  60. transformers/integrations/mxfp4.py +9 -16
  61. transformers/integrations/peft.py +5 -0
  62. transformers/integrations/quanto.py +5 -2
  63. transformers/integrations/quark.py +2 -4
  64. transformers/integrations/spqr.py +3 -5
  65. transformers/integrations/tensor_parallel.py +167 -221
  66. transformers/integrations/torchao.py +4 -6
  67. transformers/integrations/vptq.py +3 -5
  68. transformers/loss/loss_lw_detr.py +356 -0
  69. transformers/loss/loss_utils.py +2 -0
  70. transformers/masking_utils.py +47 -51
  71. transformers/model_debugging_utils.py +4 -5
  72. transformers/modelcard.py +14 -192
  73. transformers/modeling_attn_mask_utils.py +19 -19
  74. transformers/modeling_flash_attention_utils.py +27 -27
  75. transformers/modeling_gguf_pytorch_utils.py +71 -24
  76. transformers/modeling_layers.py +21 -22
  77. transformers/modeling_outputs.py +242 -253
  78. transformers/modeling_rope_utils.py +110 -113
  79. transformers/modeling_utils.py +633 -576
  80. transformers/models/__init__.py +23 -0
  81. transformers/models/afmoe/configuration_afmoe.py +26 -29
  82. transformers/models/afmoe/modeling_afmoe.py +37 -49
  83. transformers/models/afmoe/modular_afmoe.py +21 -31
  84. transformers/models/aimv2/configuration_aimv2.py +2 -5
  85. transformers/models/aimv2/modeling_aimv2.py +24 -21
  86. transformers/models/aimv2/modular_aimv2.py +11 -9
  87. transformers/models/albert/configuration_albert.py +0 -1
  88. transformers/models/albert/modeling_albert.py +70 -69
  89. transformers/models/albert/tokenization_albert.py +1 -4
  90. transformers/models/align/configuration_align.py +0 -1
  91. transformers/models/align/modeling_align.py +73 -68
  92. transformers/models/align/processing_align.py +2 -30
  93. transformers/models/altclip/configuration_altclip.py +0 -1
  94. transformers/models/altclip/modeling_altclip.py +83 -80
  95. transformers/models/altclip/processing_altclip.py +2 -15
  96. transformers/models/apertus/__init__.py +0 -1
  97. transformers/models/apertus/configuration_apertus.py +18 -21
  98. transformers/models/apertus/modeling_apertus.py +35 -36
  99. transformers/models/apertus/modular_apertus.py +32 -31
  100. transformers/models/arcee/configuration_arcee.py +20 -23
  101. transformers/models/arcee/modeling_arcee.py +32 -35
  102. transformers/models/arcee/modular_arcee.py +20 -23
  103. transformers/models/aria/configuration_aria.py +20 -23
  104. transformers/models/aria/image_processing_aria.py +25 -27
  105. transformers/models/aria/modeling_aria.py +71 -70
  106. transformers/models/aria/modular_aria.py +85 -88
  107. transformers/models/aria/processing_aria.py +28 -35
  108. transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +0 -1
  109. transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +3 -6
  110. transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +6 -8
  111. transformers/models/audioflamingo3/__init__.py +0 -1
  112. transformers/models/audioflamingo3/configuration_audioflamingo3.py +0 -1
  113. transformers/models/audioflamingo3/modeling_audioflamingo3.py +22 -23
  114. transformers/models/audioflamingo3/modular_audioflamingo3.py +12 -17
  115. transformers/models/audioflamingo3/processing_audioflamingo3.py +33 -30
  116. transformers/models/auto/auto_factory.py +5 -6
  117. transformers/models/auto/configuration_auto.py +53 -5
  118. transformers/models/auto/feature_extraction_auto.py +12 -10
  119. transformers/models/auto/image_processing_auto.py +17 -28
  120. transformers/models/auto/modeling_auto.py +38 -188
  121. transformers/models/auto/processing_auto.py +6 -1
  122. transformers/models/auto/tokenization_auto.py +147 -169
  123. transformers/models/auto/video_processing_auto.py +12 -10
  124. transformers/models/autoformer/configuration_autoformer.py +4 -7
  125. transformers/models/autoformer/modeling_autoformer.py +98 -100
  126. transformers/models/aya_vision/configuration_aya_vision.py +0 -1
  127. transformers/models/aya_vision/modeling_aya_vision.py +42 -40
  128. transformers/models/aya_vision/modular_aya_vision.py +26 -29
  129. transformers/models/aya_vision/processing_aya_vision.py +25 -53
  130. transformers/models/bamba/configuration_bamba.py +29 -32
  131. transformers/models/bamba/modeling_bamba.py +78 -83
  132. transformers/models/bamba/modular_bamba.py +68 -71
  133. transformers/models/bark/configuration_bark.py +4 -7
  134. transformers/models/bark/generation_configuration_bark.py +3 -5
  135. transformers/models/bark/modeling_bark.py +49 -55
  136. transformers/models/bark/processing_bark.py +19 -41
  137. transformers/models/bart/configuration_bart.py +0 -2
  138. transformers/models/bart/modeling_bart.py +122 -117
  139. transformers/models/barthez/tokenization_barthez.py +1 -4
  140. transformers/models/bartpho/tokenization_bartpho.py +6 -7
  141. transformers/models/beit/configuration_beit.py +0 -11
  142. transformers/models/beit/image_processing_beit.py +53 -56
  143. transformers/models/beit/image_processing_beit_fast.py +8 -10
  144. transformers/models/beit/modeling_beit.py +51 -53
  145. transformers/models/bert/configuration_bert.py +0 -1
  146. transformers/models/bert/modeling_bert.py +114 -122
  147. transformers/models/bert/tokenization_bert.py +2 -4
  148. transformers/models/bert/tokenization_bert_legacy.py +3 -5
  149. transformers/models/bert_generation/configuration_bert_generation.py +0 -1
  150. transformers/models/bert_generation/modeling_bert_generation.py +49 -49
  151. transformers/models/bert_generation/tokenization_bert_generation.py +2 -3
  152. transformers/models/bert_japanese/tokenization_bert_japanese.py +5 -6
  153. transformers/models/bertweet/tokenization_bertweet.py +1 -3
  154. transformers/models/big_bird/configuration_big_bird.py +0 -1
  155. transformers/models/big_bird/modeling_big_bird.py +110 -109
  156. transformers/models/big_bird/tokenization_big_bird.py +1 -4
  157. transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +0 -1
  158. transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +116 -111
  159. transformers/models/biogpt/configuration_biogpt.py +0 -1
  160. transformers/models/biogpt/modeling_biogpt.py +69 -71
  161. transformers/models/biogpt/modular_biogpt.py +59 -61
  162. transformers/models/biogpt/tokenization_biogpt.py +3 -5
  163. transformers/models/bit/configuration_bit.py +0 -1
  164. transformers/models/bit/image_processing_bit.py +21 -24
  165. transformers/models/bit/image_processing_bit_fast.py +0 -1
  166. transformers/models/bit/modeling_bit.py +14 -12
  167. transformers/models/bitnet/configuration_bitnet.py +18 -21
  168. transformers/models/bitnet/modeling_bitnet.py +32 -35
  169. transformers/models/bitnet/modular_bitnet.py +4 -6
  170. transformers/models/blenderbot/configuration_blenderbot.py +0 -1
  171. transformers/models/blenderbot/modeling_blenderbot.py +71 -95
  172. transformers/models/blenderbot/tokenization_blenderbot.py +6 -8
  173. transformers/models/blenderbot_small/configuration_blenderbot_small.py +0 -1
  174. transformers/models/blenderbot_small/modeling_blenderbot_small.py +73 -68
  175. transformers/models/blenderbot_small/tokenization_blenderbot_small.py +1 -3
  176. transformers/models/blip/configuration_blip.py +0 -1
  177. transformers/models/blip/image_processing_blip.py +17 -20
  178. transformers/models/blip/image_processing_blip_fast.py +0 -1
  179. transformers/models/blip/modeling_blip.py +62 -71
  180. transformers/models/blip/modeling_blip_text.py +71 -65
  181. transformers/models/blip/processing_blip.py +5 -36
  182. transformers/models/blip_2/configuration_blip_2.py +0 -1
  183. transformers/models/blip_2/modeling_blip_2.py +72 -71
  184. transformers/models/blip_2/processing_blip_2.py +8 -38
  185. transformers/models/bloom/configuration_bloom.py +0 -1
  186. transformers/models/bloom/modeling_bloom.py +71 -103
  187. transformers/models/blt/configuration_blt.py +71 -74
  188. transformers/models/blt/modeling_blt.py +235 -78
  189. transformers/models/blt/modular_blt.py +225 -62
  190. transformers/models/bridgetower/configuration_bridgetower.py +0 -1
  191. transformers/models/bridgetower/image_processing_bridgetower.py +34 -35
  192. transformers/models/bridgetower/image_processing_bridgetower_fast.py +7 -10
  193. transformers/models/bridgetower/modeling_bridgetower.py +113 -109
  194. transformers/models/bridgetower/processing_bridgetower.py +2 -16
  195. transformers/models/bros/configuration_bros.py +0 -1
  196. transformers/models/bros/modeling_bros.py +86 -80
  197. transformers/models/bros/processing_bros.py +2 -12
  198. transformers/models/byt5/tokenization_byt5.py +4 -6
  199. transformers/models/camembert/configuration_camembert.py +0 -1
  200. transformers/models/camembert/modeling_camembert.py +196 -195
  201. transformers/models/camembert/modular_camembert.py +51 -54
  202. transformers/models/camembert/tokenization_camembert.py +1 -4
  203. transformers/models/canine/configuration_canine.py +0 -1
  204. transformers/models/canine/modeling_canine.py +79 -75
  205. transformers/models/canine/tokenization_canine.py +2 -1
  206. transformers/models/chameleon/configuration_chameleon.py +24 -27
  207. transformers/models/chameleon/image_processing_chameleon.py +21 -24
  208. transformers/models/chameleon/image_processing_chameleon_fast.py +0 -1
  209. transformers/models/chameleon/modeling_chameleon.py +62 -60
  210. transformers/models/chameleon/processing_chameleon.py +16 -41
  211. transformers/models/chinese_clip/configuration_chinese_clip.py +0 -1
  212. transformers/models/chinese_clip/image_processing_chinese_clip.py +21 -24
  213. transformers/models/chinese_clip/image_processing_chinese_clip_fast.py +0 -1
  214. transformers/models/chinese_clip/modeling_chinese_clip.py +71 -69
  215. transformers/models/chinese_clip/processing_chinese_clip.py +2 -15
  216. transformers/models/clap/configuration_clap.py +0 -1
  217. transformers/models/clap/feature_extraction_clap.py +11 -12
  218. transformers/models/clap/modeling_clap.py +113 -104
  219. transformers/models/clap/processing_clap.py +2 -15
  220. transformers/models/clip/configuration_clip.py +0 -1
  221. transformers/models/clip/image_processing_clip.py +21 -24
  222. transformers/models/clip/image_processing_clip_fast.py +0 -1
  223. transformers/models/clip/modeling_clip.py +47 -46
  224. transformers/models/clip/processing_clip.py +2 -14
  225. transformers/models/clip/tokenization_clip.py +2 -5
  226. transformers/models/clipseg/configuration_clipseg.py +0 -1
  227. transformers/models/clipseg/modeling_clipseg.py +90 -87
  228. transformers/models/clipseg/processing_clipseg.py +8 -39
  229. transformers/models/clvp/configuration_clvp.py +1 -3
  230. transformers/models/clvp/feature_extraction_clvp.py +7 -10
  231. transformers/models/clvp/modeling_clvp.py +133 -118
  232. transformers/models/clvp/number_normalizer.py +1 -2
  233. transformers/models/clvp/processing_clvp.py +3 -20
  234. transformers/models/clvp/tokenization_clvp.py +0 -1
  235. transformers/models/code_llama/tokenization_code_llama.py +4 -7
  236. transformers/models/codegen/configuration_codegen.py +0 -1
  237. transformers/models/codegen/modeling_codegen.py +61 -52
  238. transformers/models/codegen/tokenization_codegen.py +5 -6
  239. transformers/models/cohere/configuration_cohere.py +20 -23
  240. transformers/models/cohere/modeling_cohere.py +36 -39
  241. transformers/models/cohere/modular_cohere.py +24 -28
  242. transformers/models/cohere/tokenization_cohere.py +5 -6
  243. transformers/models/cohere2/configuration_cohere2.py +21 -24
  244. transformers/models/cohere2/modeling_cohere2.py +35 -38
  245. transformers/models/cohere2/modular_cohere2.py +39 -41
  246. transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +6 -8
  247. transformers/models/cohere2_vision/modeling_cohere2_vision.py +35 -33
  248. transformers/models/cohere2_vision/modular_cohere2_vision.py +21 -23
  249. transformers/models/cohere2_vision/processing_cohere2_vision.py +6 -36
  250. transformers/models/colpali/configuration_colpali.py +0 -1
  251. transformers/models/colpali/modeling_colpali.py +14 -16
  252. transformers/models/colpali/modular_colpali.py +11 -51
  253. transformers/models/colpali/processing_colpali.py +14 -52
  254. transformers/models/colqwen2/modeling_colqwen2.py +20 -22
  255. transformers/models/colqwen2/modular_colqwen2.py +29 -68
  256. transformers/models/colqwen2/processing_colqwen2.py +16 -52
  257. transformers/models/conditional_detr/configuration_conditional_detr.py +1 -2
  258. transformers/models/conditional_detr/image_processing_conditional_detr.py +64 -66
  259. transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +22 -22
  260. transformers/models/conditional_detr/modeling_conditional_detr.py +82 -81
  261. transformers/models/conditional_detr/modular_conditional_detr.py +1 -3
  262. transformers/models/convbert/configuration_convbert.py +0 -1
  263. transformers/models/convbert/modeling_convbert.py +88 -87
  264. transformers/models/convbert/tokenization_convbert.py +0 -1
  265. transformers/models/convnext/configuration_convnext.py +0 -1
  266. transformers/models/convnext/image_processing_convnext.py +20 -23
  267. transformers/models/convnext/image_processing_convnext_fast.py +14 -19
  268. transformers/models/convnext/modeling_convnext.py +5 -8
  269. transformers/models/convnextv2/configuration_convnextv2.py +0 -1
  270. transformers/models/convnextv2/modeling_convnextv2.py +5 -8
  271. transformers/models/cpm/tokenization_cpm.py +6 -7
  272. transformers/models/cpm/tokenization_cpm_fast.py +3 -5
  273. transformers/models/cpmant/configuration_cpmant.py +0 -1
  274. transformers/models/cpmant/modeling_cpmant.py +38 -40
  275. transformers/models/cpmant/tokenization_cpmant.py +1 -3
  276. transformers/models/csm/configuration_csm.py +49 -51
  277. transformers/models/csm/generation_csm.py +31 -35
  278. transformers/models/csm/modeling_csm.py +81 -82
  279. transformers/models/csm/modular_csm.py +58 -58
  280. transformers/models/csm/processing_csm.py +25 -68
  281. transformers/models/ctrl/configuration_ctrl.py +0 -1
  282. transformers/models/ctrl/modeling_ctrl.py +52 -43
  283. transformers/models/ctrl/tokenization_ctrl.py +0 -1
  284. transformers/models/cvt/configuration_cvt.py +0 -1
  285. transformers/models/cvt/modeling_cvt.py +18 -16
  286. transformers/models/cwm/__init__.py +0 -1
  287. transformers/models/cwm/configuration_cwm.py +3 -5
  288. transformers/models/cwm/modeling_cwm.py +33 -35
  289. transformers/models/cwm/modular_cwm.py +10 -12
  290. transformers/models/d_fine/configuration_d_fine.py +3 -5
  291. transformers/models/d_fine/modeling_d_fine.py +127 -121
  292. transformers/models/d_fine/modular_d_fine.py +23 -13
  293. transformers/models/dab_detr/configuration_dab_detr.py +2 -3
  294. transformers/models/dab_detr/modeling_dab_detr.py +69 -71
  295. transformers/models/dac/configuration_dac.py +0 -1
  296. transformers/models/dac/feature_extraction_dac.py +6 -9
  297. transformers/models/dac/modeling_dac.py +21 -23
  298. transformers/models/data2vec/configuration_data2vec_audio.py +0 -1
  299. transformers/models/data2vec/configuration_data2vec_text.py +0 -1
  300. transformers/models/data2vec/configuration_data2vec_vision.py +0 -1
  301. transformers/models/data2vec/modeling_data2vec_audio.py +52 -56
  302. transformers/models/data2vec/modeling_data2vec_text.py +98 -93
  303. transformers/models/data2vec/modeling_data2vec_vision.py +41 -42
  304. transformers/models/data2vec/modular_data2vec_audio.py +6 -1
  305. transformers/models/data2vec/modular_data2vec_text.py +58 -54
  306. transformers/models/dbrx/configuration_dbrx.py +27 -20
  307. transformers/models/dbrx/modeling_dbrx.py +40 -43
  308. transformers/models/dbrx/modular_dbrx.py +31 -33
  309. transformers/models/deberta/configuration_deberta.py +0 -1
  310. transformers/models/deberta/modeling_deberta.py +59 -60
  311. transformers/models/deberta/tokenization_deberta.py +2 -5
  312. transformers/models/deberta_v2/configuration_deberta_v2.py +0 -1
  313. transformers/models/deberta_v2/modeling_deberta_v2.py +65 -65
  314. transformers/models/deberta_v2/tokenization_deberta_v2.py +1 -4
  315. transformers/models/decision_transformer/configuration_decision_transformer.py +0 -1
  316. transformers/models/decision_transformer/modeling_decision_transformer.py +56 -55
  317. transformers/models/deepseek_v2/configuration_deepseek_v2.py +34 -37
  318. transformers/models/deepseek_v2/modeling_deepseek_v2.py +39 -37
  319. transformers/models/deepseek_v2/modular_deepseek_v2.py +44 -44
  320. transformers/models/deepseek_v3/configuration_deepseek_v3.py +35 -38
  321. transformers/models/deepseek_v3/modeling_deepseek_v3.py +40 -38
  322. transformers/models/deepseek_v3/modular_deepseek_v3.py +10 -7
  323. transformers/models/deepseek_vl/configuration_deepseek_vl.py +2 -3
  324. transformers/models/deepseek_vl/image_processing_deepseek_vl.py +25 -26
  325. transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +7 -7
  326. transformers/models/deepseek_vl/modeling_deepseek_vl.py +40 -36
  327. transformers/models/deepseek_vl/modular_deepseek_vl.py +14 -43
  328. transformers/models/deepseek_vl/processing_deepseek_vl.py +10 -41
  329. transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +3 -5
  330. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +35 -35
  331. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +16 -20
  332. transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +42 -38
  333. transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +80 -99
  334. transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py +12 -44
  335. transformers/models/deformable_detr/configuration_deformable_detr.py +2 -3
  336. transformers/models/deformable_detr/image_processing_deformable_detr.py +59 -61
  337. transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +17 -17
  338. transformers/models/deformable_detr/modeling_deformable_detr.py +67 -68
  339. transformers/models/deformable_detr/modular_deformable_detr.py +1 -3
  340. transformers/models/deit/configuration_deit.py +0 -1
  341. transformers/models/deit/image_processing_deit.py +18 -21
  342. transformers/models/deit/image_processing_deit_fast.py +0 -1
  343. transformers/models/deit/modeling_deit.py +16 -18
  344. transformers/models/depth_anything/configuration_depth_anything.py +2 -4
  345. transformers/models/depth_anything/modeling_depth_anything.py +5 -8
  346. transformers/models/depth_pro/configuration_depth_pro.py +0 -1
  347. transformers/models/depth_pro/image_processing_depth_pro.py +22 -23
  348. transformers/models/depth_pro/image_processing_depth_pro_fast.py +6 -8
  349. transformers/models/depth_pro/modeling_depth_pro.py +21 -23
  350. transformers/models/detr/configuration_detr.py +1 -2
  351. transformers/models/detr/image_processing_detr.py +64 -66
  352. transformers/models/detr/image_processing_detr_fast.py +22 -23
  353. transformers/models/detr/modeling_detr.py +78 -73
  354. transformers/models/dia/configuration_dia.py +5 -8
  355. transformers/models/dia/feature_extraction_dia.py +6 -9
  356. transformers/models/dia/generation_dia.py +42 -45
  357. transformers/models/dia/modeling_dia.py +73 -65
  358. transformers/models/dia/modular_dia.py +63 -54
  359. transformers/models/dia/processing_dia.py +39 -29
  360. transformers/models/dia/tokenization_dia.py +3 -6
  361. transformers/models/diffllama/configuration_diffllama.py +20 -23
  362. transformers/models/diffllama/modeling_diffllama.py +44 -47
  363. transformers/models/diffllama/modular_diffllama.py +17 -19
  364. transformers/models/dinat/configuration_dinat.py +0 -1
  365. transformers/models/dinat/modeling_dinat.py +40 -42
  366. transformers/models/dinov2/configuration_dinov2.py +0 -1
  367. transformers/models/dinov2/modeling_dinov2.py +11 -13
  368. transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +1 -1
  369. transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +12 -13
  370. transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +5 -7
  371. transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +4 -7
  372. transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +3 -6
  373. transformers/models/dinov3_vit/configuration_dinov3_vit.py +5 -8
  374. transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +5 -7
  375. transformers/models/dinov3_vit/modeling_dinov3_vit.py +17 -16
  376. transformers/models/dinov3_vit/modular_dinov3_vit.py +14 -13
  377. transformers/models/distilbert/configuration_distilbert.py +0 -1
  378. transformers/models/distilbert/modeling_distilbert.py +55 -55
  379. transformers/models/distilbert/tokenization_distilbert.py +0 -1
  380. transformers/models/doge/__init__.py +0 -1
  381. transformers/models/doge/configuration_doge.py +25 -28
  382. transformers/models/doge/modeling_doge.py +43 -46
  383. transformers/models/doge/modular_doge.py +57 -58
  384. transformers/models/donut/configuration_donut_swin.py +0 -1
  385. transformers/models/donut/image_processing_donut.py +26 -29
  386. transformers/models/donut/image_processing_donut_fast.py +5 -11
  387. transformers/models/donut/modeling_donut_swin.py +60 -58
  388. transformers/models/donut/processing_donut.py +5 -26
  389. transformers/models/dots1/configuration_dots1.py +27 -29
  390. transformers/models/dots1/modeling_dots1.py +45 -39
  391. transformers/models/dots1/modular_dots1.py +0 -1
  392. transformers/models/dpr/configuration_dpr.py +0 -1
  393. transformers/models/dpr/modeling_dpr.py +37 -39
  394. transformers/models/dpr/tokenization_dpr.py +7 -9
  395. transformers/models/dpr/tokenization_dpr_fast.py +7 -9
  396. transformers/models/dpt/configuration_dpt.py +1 -2
  397. transformers/models/dpt/image_processing_dpt.py +65 -66
  398. transformers/models/dpt/image_processing_dpt_fast.py +14 -16
  399. transformers/models/dpt/modeling_dpt.py +19 -21
  400. transformers/models/dpt/modular_dpt.py +11 -13
  401. transformers/models/edgetam/configuration_edgetam.py +1 -2
  402. transformers/models/edgetam/modeling_edgetam.py +44 -43
  403. transformers/models/edgetam/modular_edgetam.py +17 -20
  404. transformers/models/edgetam_video/__init__.py +0 -1
  405. transformers/models/edgetam_video/configuration_edgetam_video.py +0 -1
  406. transformers/models/edgetam_video/modeling_edgetam_video.py +131 -120
  407. transformers/models/edgetam_video/modular_edgetam_video.py +29 -37
  408. transformers/models/efficientloftr/configuration_efficientloftr.py +4 -5
  409. transformers/models/efficientloftr/image_processing_efficientloftr.py +14 -16
  410. transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +5 -6
  411. transformers/models/efficientloftr/modeling_efficientloftr.py +41 -30
  412. transformers/models/efficientloftr/modular_efficientloftr.py +1 -3
  413. transformers/models/efficientnet/configuration_efficientnet.py +0 -1
  414. transformers/models/efficientnet/image_processing_efficientnet.py +28 -32
  415. transformers/models/efficientnet/image_processing_efficientnet_fast.py +15 -17
  416. transformers/models/efficientnet/modeling_efficientnet.py +17 -15
  417. transformers/models/electra/configuration_electra.py +0 -1
  418. transformers/models/electra/modeling_electra.py +108 -103
  419. transformers/models/emu3/configuration_emu3.py +5 -7
  420. transformers/models/emu3/image_processing_emu3.py +44 -39
  421. transformers/models/emu3/modeling_emu3.py +67 -64
  422. transformers/models/emu3/modular_emu3.py +39 -35
  423. transformers/models/emu3/processing_emu3.py +18 -43
  424. transformers/models/encodec/configuration_encodec.py +2 -4
  425. transformers/models/encodec/feature_extraction_encodec.py +10 -13
  426. transformers/models/encodec/modeling_encodec.py +39 -29
  427. transformers/models/encoder_decoder/configuration_encoder_decoder.py +0 -1
  428. transformers/models/encoder_decoder/modeling_encoder_decoder.py +17 -19
  429. transformers/models/eomt/configuration_eomt.py +0 -1
  430. transformers/models/eomt/image_processing_eomt.py +53 -55
  431. transformers/models/eomt/image_processing_eomt_fast.py +59 -28
  432. transformers/models/eomt/modeling_eomt.py +23 -18
  433. transformers/models/eomt/modular_eomt.py +18 -13
  434. transformers/models/ernie/configuration_ernie.py +0 -1
  435. transformers/models/ernie/modeling_ernie.py +127 -132
  436. transformers/models/ernie/modular_ernie.py +97 -103
  437. transformers/models/ernie4_5/configuration_ernie4_5.py +18 -20
  438. transformers/models/ernie4_5/modeling_ernie4_5.py +32 -34
  439. transformers/models/ernie4_5/modular_ernie4_5.py +1 -3
  440. transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +27 -29
  441. transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +52 -51
  442. transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +16 -44
  443. transformers/models/ernie4_5_vl_moe/__init__.py +31 -0
  444. transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +329 -0
  445. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +455 -0
  446. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +231 -0
  447. transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +1895 -0
  448. transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +1901 -0
  449. transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +249 -0
  450. transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +593 -0
  451. transformers/models/esm/configuration_esm.py +2 -4
  452. transformers/models/esm/modeling_esm.py +38 -34
  453. transformers/models/esm/modeling_esmfold.py +48 -45
  454. transformers/models/esm/openfold_utils/chunk_utils.py +6 -6
  455. transformers/models/esm/openfold_utils/loss.py +1 -2
  456. transformers/models/esm/openfold_utils/protein.py +13 -13
  457. transformers/models/esm/openfold_utils/tensor_utils.py +6 -6
  458. transformers/models/esm/tokenization_esm.py +2 -4
  459. transformers/models/evolla/configuration_evolla.py +29 -32
  460. transformers/models/evolla/modeling_evolla.py +67 -62
  461. transformers/models/evolla/modular_evolla.py +53 -47
  462. transformers/models/evolla/processing_evolla.py +23 -35
  463. transformers/models/exaone4/configuration_exaone4.py +19 -22
  464. transformers/models/exaone4/modeling_exaone4.py +33 -36
  465. transformers/models/exaone4/modular_exaone4.py +40 -42
  466. transformers/models/falcon/configuration_falcon.py +22 -25
  467. transformers/models/falcon/modeling_falcon.py +75 -78
  468. transformers/models/falcon_h1/configuration_falcon_h1.py +40 -43
  469. transformers/models/falcon_h1/modeling_falcon_h1.py +80 -78
  470. transformers/models/falcon_h1/modular_falcon_h1.py +54 -50
  471. transformers/models/falcon_mamba/configuration_falcon_mamba.py +0 -1
  472. transformers/models/falcon_mamba/modeling_falcon_mamba.py +50 -47
  473. transformers/models/falcon_mamba/modular_falcon_mamba.py +16 -14
  474. transformers/models/fast_vlm/configuration_fast_vlm.py +1 -0
  475. transformers/models/fast_vlm/modeling_fast_vlm.py +43 -39
  476. transformers/models/fast_vlm/modular_fast_vlm.py +2 -3
  477. transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +2 -5
  478. transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +68 -57
  479. transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +2 -3
  480. transformers/models/flaubert/configuration_flaubert.py +0 -1
  481. transformers/models/flaubert/modeling_flaubert.py +138 -143
  482. transformers/models/flaubert/tokenization_flaubert.py +3 -5
  483. transformers/models/flava/configuration_flava.py +5 -6
  484. transformers/models/flava/image_processing_flava.py +66 -67
  485. transformers/models/flava/image_processing_flava_fast.py +42 -45
  486. transformers/models/flava/modeling_flava.py +111 -107
  487. transformers/models/flava/processing_flava.py +2 -12
  488. transformers/models/flex_olmo/__init__.py +0 -1
  489. transformers/models/flex_olmo/configuration_flex_olmo.py +23 -25
  490. transformers/models/flex_olmo/modeling_flex_olmo.py +44 -43
  491. transformers/models/flex_olmo/modular_flex_olmo.py +35 -37
  492. transformers/models/florence2/configuration_florence2.py +0 -1
  493. transformers/models/florence2/modeling_florence2.py +59 -43
  494. transformers/models/florence2/modular_florence2.py +65 -81
  495. transformers/models/florence2/processing_florence2.py +18 -47
  496. transformers/models/fnet/configuration_fnet.py +0 -1
  497. transformers/models/fnet/modeling_fnet.py +76 -80
  498. transformers/models/fnet/tokenization_fnet.py +0 -1
  499. transformers/models/focalnet/configuration_focalnet.py +0 -1
  500. transformers/models/focalnet/modeling_focalnet.py +39 -41
  501. transformers/models/fsmt/configuration_fsmt.py +0 -1
  502. transformers/models/fsmt/modeling_fsmt.py +47 -48
  503. transformers/models/fsmt/tokenization_fsmt.py +3 -5
  504. transformers/models/funnel/configuration_funnel.py +0 -1
  505. transformers/models/funnel/modeling_funnel.py +91 -93
  506. transformers/models/funnel/tokenization_funnel.py +2 -5
  507. transformers/models/fuyu/configuration_fuyu.py +23 -26
  508. transformers/models/fuyu/image_processing_fuyu.py +29 -31
  509. transformers/models/fuyu/image_processing_fuyu_fast.py +12 -13
  510. transformers/models/fuyu/modeling_fuyu.py +29 -30
  511. transformers/models/fuyu/processing_fuyu.py +23 -34
  512. transformers/models/gemma/configuration_gemma.py +20 -23
  513. transformers/models/gemma/modeling_gemma.py +42 -46
  514. transformers/models/gemma/modular_gemma.py +37 -40
  515. transformers/models/gemma/tokenization_gemma.py +3 -6
  516. transformers/models/gemma2/configuration_gemma2.py +25 -28
  517. transformers/models/gemma2/modeling_gemma2.py +35 -38
  518. transformers/models/gemma2/modular_gemma2.py +56 -58
  519. transformers/models/gemma3/configuration_gemma3.py +28 -29
  520. transformers/models/gemma3/image_processing_gemma3.py +29 -31
  521. transformers/models/gemma3/image_processing_gemma3_fast.py +9 -11
  522. transformers/models/gemma3/modeling_gemma3.py +112 -94
  523. transformers/models/gemma3/modular_gemma3.py +110 -91
  524. transformers/models/gemma3/processing_gemma3.py +5 -5
  525. transformers/models/gemma3n/configuration_gemma3n.py +12 -10
  526. transformers/models/gemma3n/feature_extraction_gemma3n.py +9 -11
  527. transformers/models/gemma3n/modeling_gemma3n.py +127 -98
  528. transformers/models/gemma3n/modular_gemma3n.py +117 -84
  529. transformers/models/gemma3n/processing_gemma3n.py +12 -26
  530. transformers/models/git/configuration_git.py +0 -1
  531. transformers/models/git/modeling_git.py +250 -197
  532. transformers/models/git/processing_git.py +2 -14
  533. transformers/models/glm/configuration_glm.py +19 -21
  534. transformers/models/glm/modeling_glm.py +33 -36
  535. transformers/models/glm/modular_glm.py +4 -7
  536. transformers/models/glm4/configuration_glm4.py +19 -21
  537. transformers/models/glm4/modeling_glm4.py +36 -38
  538. transformers/models/glm4/modular_glm4.py +8 -10
  539. transformers/models/glm46v/configuration_glm46v.py +0 -1
  540. transformers/models/glm46v/image_processing_glm46v.py +35 -40
  541. transformers/models/glm46v/image_processing_glm46v_fast.py +7 -7
  542. transformers/models/glm46v/modeling_glm46v.py +54 -52
  543. transformers/models/glm46v/modular_glm46v.py +4 -3
  544. transformers/models/glm46v/processing_glm46v.py +7 -41
  545. transformers/models/glm46v/video_processing_glm46v.py +9 -11
  546. transformers/models/glm4_moe/configuration_glm4_moe.py +25 -28
  547. transformers/models/glm4_moe/modeling_glm4_moe.py +41 -40
  548. transformers/models/glm4_moe/modular_glm4_moe.py +27 -30
  549. transformers/models/glm4_moe_lite/__init__.py +28 -0
  550. transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +235 -0
  551. transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +740 -0
  552. transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +304 -0
  553. transformers/models/glm4v/configuration_glm4v.py +14 -17
  554. transformers/models/glm4v/image_processing_glm4v.py +34 -40
  555. transformers/models/glm4v/image_processing_glm4v_fast.py +6 -7
  556. transformers/models/glm4v/modeling_glm4v.py +148 -156
  557. transformers/models/glm4v/modular_glm4v.py +142 -185
  558. transformers/models/glm4v/processing_glm4v.py +7 -41
  559. transformers/models/glm4v/video_processing_glm4v.py +9 -11
  560. transformers/models/glm4v_moe/configuration_glm4v_moe.py +119 -122
  561. transformers/models/glm4v_moe/modeling_glm4v_moe.py +275 -319
  562. transformers/models/glm4v_moe/modular_glm4v_moe.py +66 -163
  563. transformers/models/glm_image/__init__.py +31 -0
  564. transformers/models/glm_image/configuration_glm_image.py +352 -0
  565. transformers/models/glm_image/image_processing_glm_image.py +503 -0
  566. transformers/models/glm_image/image_processing_glm_image_fast.py +296 -0
  567. transformers/models/glm_image/modeling_glm_image.py +1590 -0
  568. transformers/models/glm_image/modular_glm_image.py +1480 -0
  569. transformers/models/glm_image/processing_glm_image.py +217 -0
  570. transformers/models/glmasr/__init__.py +29 -0
  571. transformers/models/glmasr/configuration_glmasr.py +196 -0
  572. transformers/models/glmasr/modeling_glmasr.py +511 -0
  573. transformers/models/glmasr/modular_glmasr.py +431 -0
  574. transformers/models/glmasr/processing_glmasr.py +331 -0
  575. transformers/models/glpn/configuration_glpn.py +0 -1
  576. transformers/models/glpn/image_processing_glpn.py +11 -12
  577. transformers/models/glpn/image_processing_glpn_fast.py +8 -10
  578. transformers/models/glpn/modeling_glpn.py +10 -12
  579. transformers/models/got_ocr2/configuration_got_ocr2.py +5 -8
  580. transformers/models/got_ocr2/image_processing_got_ocr2.py +22 -24
  581. transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +6 -8
  582. transformers/models/got_ocr2/modeling_got_ocr2.py +48 -45
  583. transformers/models/got_ocr2/modular_got_ocr2.py +31 -34
  584. transformers/models/got_ocr2/processing_got_ocr2.py +42 -63
  585. transformers/models/gpt2/configuration_gpt2.py +0 -1
  586. transformers/models/gpt2/modeling_gpt2.py +114 -113
  587. transformers/models/gpt2/tokenization_gpt2.py +6 -9
  588. transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +0 -1
  589. transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +76 -88
  590. transformers/models/gpt_neo/configuration_gpt_neo.py +0 -1
  591. transformers/models/gpt_neo/modeling_gpt_neo.py +77 -66
  592. transformers/models/gpt_neox/configuration_gpt_neox.py +19 -22
  593. transformers/models/gpt_neox/modeling_gpt_neox.py +71 -73
  594. transformers/models/gpt_neox/modular_gpt_neox.py +64 -66
  595. transformers/models/gpt_neox/tokenization_gpt_neox.py +2 -5
  596. transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +15 -18
  597. transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +42 -45
  598. transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +1 -3
  599. transformers/models/gpt_oss/configuration_gpt_oss.py +38 -24
  600. transformers/models/gpt_oss/modeling_gpt_oss.py +40 -44
  601. transformers/models/gpt_oss/modular_gpt_oss.py +22 -26
  602. transformers/models/gpt_sw3/tokenization_gpt_sw3.py +4 -4
  603. transformers/models/gptj/configuration_gptj.py +0 -1
  604. transformers/models/gptj/modeling_gptj.py +96 -86
  605. transformers/models/granite/configuration_granite.py +23 -26
  606. transformers/models/granite/modeling_granite.py +40 -42
  607. transformers/models/granite/modular_granite.py +29 -31
  608. transformers/models/granite_speech/configuration_granite_speech.py +0 -1
  609. transformers/models/granite_speech/feature_extraction_granite_speech.py +1 -3
  610. transformers/models/granite_speech/modeling_granite_speech.py +36 -24
  611. transformers/models/granite_speech/processing_granite_speech.py +11 -4
  612. transformers/models/granitemoe/configuration_granitemoe.py +26 -29
  613. transformers/models/granitemoe/modeling_granitemoe.py +37 -40
  614. transformers/models/granitemoe/modular_granitemoe.py +22 -25
  615. transformers/models/granitemoehybrid/__init__.py +0 -1
  616. transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +41 -40
  617. transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +92 -86
  618. transformers/models/granitemoehybrid/modular_granitemoehybrid.py +29 -21
  619. transformers/models/granitemoeshared/configuration_granitemoeshared.py +27 -30
  620. transformers/models/granitemoeshared/modeling_granitemoeshared.py +50 -55
  621. transformers/models/granitemoeshared/modular_granitemoeshared.py +19 -21
  622. transformers/models/grounding_dino/configuration_grounding_dino.py +2 -4
  623. transformers/models/grounding_dino/image_processing_grounding_dino.py +60 -62
  624. transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +17 -18
  625. transformers/models/grounding_dino/modeling_grounding_dino.py +95 -97
  626. transformers/models/grounding_dino/modular_grounding_dino.py +2 -3
  627. transformers/models/grounding_dino/processing_grounding_dino.py +10 -38
  628. transformers/models/groupvit/configuration_groupvit.py +0 -1
  629. transformers/models/groupvit/modeling_groupvit.py +75 -71
  630. transformers/models/helium/configuration_helium.py +20 -22
  631. transformers/models/helium/modeling_helium.py +34 -37
  632. transformers/models/helium/modular_helium.py +3 -7
  633. transformers/models/herbert/tokenization_herbert.py +4 -6
  634. transformers/models/hgnet_v2/configuration_hgnet_v2.py +0 -1
  635. transformers/models/hgnet_v2/modeling_hgnet_v2.py +16 -9
  636. transformers/models/hgnet_v2/modular_hgnet_v2.py +16 -9
  637. transformers/models/hiera/configuration_hiera.py +0 -1
  638. transformers/models/hiera/modeling_hiera.py +60 -62
  639. transformers/models/hubert/configuration_hubert.py +0 -1
  640. transformers/models/hubert/modeling_hubert.py +39 -37
  641. transformers/models/hubert/modular_hubert.py +12 -11
  642. transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +21 -24
  643. transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +31 -34
  644. transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +4 -6
  645. transformers/models/hunyuan_v1_moe/__init__.py +1 -1
  646. transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +25 -28
  647. transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +44 -39
  648. transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +9 -9
  649. transformers/models/ibert/configuration_ibert.py +0 -1
  650. transformers/models/ibert/modeling_ibert.py +76 -62
  651. transformers/models/ibert/quant_modules.py +0 -1
  652. transformers/models/idefics/configuration_idefics.py +0 -1
  653. transformers/models/idefics/image_processing_idefics.py +13 -15
  654. transformers/models/idefics/modeling_idefics.py +70 -61
  655. transformers/models/idefics/perceiver.py +1 -3
  656. transformers/models/idefics/processing_idefics.py +32 -48
  657. transformers/models/idefics/vision.py +22 -24
  658. transformers/models/idefics2/configuration_idefics2.py +0 -1
  659. transformers/models/idefics2/image_processing_idefics2.py +31 -32
  660. transformers/models/idefics2/image_processing_idefics2_fast.py +7 -8
  661. transformers/models/idefics2/modeling_idefics2.py +63 -59
  662. transformers/models/idefics2/processing_idefics2.py +10 -68
  663. transformers/models/idefics3/configuration_idefics3.py +0 -1
  664. transformers/models/idefics3/image_processing_idefics3.py +42 -43
  665. transformers/models/idefics3/image_processing_idefics3_fast.py +11 -12
  666. transformers/models/idefics3/modeling_idefics3.py +57 -55
  667. transformers/models/idefics3/processing_idefics3.py +15 -69
  668. transformers/models/ijepa/configuration_ijepa.py +0 -1
  669. transformers/models/ijepa/modeling_ijepa.py +10 -11
  670. transformers/models/ijepa/modular_ijepa.py +5 -7
  671. transformers/models/imagegpt/configuration_imagegpt.py +0 -1
  672. transformers/models/imagegpt/image_processing_imagegpt.py +17 -18
  673. transformers/models/imagegpt/image_processing_imagegpt_fast.py +9 -14
  674. transformers/models/imagegpt/modeling_imagegpt.py +66 -60
  675. transformers/models/informer/configuration_informer.py +6 -9
  676. transformers/models/informer/modeling_informer.py +84 -86
  677. transformers/models/informer/modular_informer.py +13 -16
  678. transformers/models/instructblip/configuration_instructblip.py +0 -1
  679. transformers/models/instructblip/modeling_instructblip.py +45 -44
  680. transformers/models/instructblip/processing_instructblip.py +10 -36
  681. transformers/models/instructblipvideo/configuration_instructblipvideo.py +0 -1
  682. transformers/models/instructblipvideo/modeling_instructblipvideo.py +107 -105
  683. transformers/models/instructblipvideo/modular_instructblipvideo.py +34 -36
  684. transformers/models/instructblipvideo/processing_instructblipvideo.py +14 -33
  685. transformers/models/instructblipvideo/video_processing_instructblipvideo.py +4 -6
  686. transformers/models/internvl/configuration_internvl.py +0 -1
  687. transformers/models/internvl/modeling_internvl.py +52 -51
  688. transformers/models/internvl/modular_internvl.py +24 -30
  689. transformers/models/internvl/processing_internvl.py +12 -45
  690. transformers/models/internvl/video_processing_internvl.py +8 -10
  691. transformers/models/jais2/__init__.py +27 -0
  692. transformers/models/jais2/configuration_jais2.py +150 -0
  693. transformers/models/jais2/modeling_jais2.py +484 -0
  694. transformers/models/jais2/modular_jais2.py +194 -0
  695. transformers/models/jamba/configuration_jamba.py +0 -1
  696. transformers/models/jamba/modeling_jamba.py +67 -65
  697. transformers/models/jamba/modular_jamba.py +54 -55
  698. transformers/models/janus/configuration_janus.py +0 -1
  699. transformers/models/janus/image_processing_janus.py +35 -37
  700. transformers/models/janus/image_processing_janus_fast.py +12 -14
  701. transformers/models/janus/modeling_janus.py +56 -50
  702. transformers/models/janus/modular_janus.py +76 -70
  703. transformers/models/janus/processing_janus.py +17 -43
  704. transformers/models/jetmoe/configuration_jetmoe.py +20 -23
  705. transformers/models/jetmoe/modeling_jetmoe.py +41 -44
  706. transformers/models/jetmoe/modular_jetmoe.py +31 -33
  707. transformers/models/kosmos2/configuration_kosmos2.py +0 -1
  708. transformers/models/kosmos2/modeling_kosmos2.py +159 -148
  709. transformers/models/kosmos2/processing_kosmos2.py +40 -55
  710. transformers/models/kosmos2_5/__init__.py +0 -1
  711. transformers/models/kosmos2_5/configuration_kosmos2_5.py +0 -1
  712. transformers/models/kosmos2_5/image_processing_kosmos2_5.py +10 -12
  713. transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +4 -13
  714. transformers/models/kosmos2_5/modeling_kosmos2_5.py +118 -110
  715. transformers/models/kosmos2_5/processing_kosmos2_5.py +8 -29
  716. transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +23 -25
  717. transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py +12 -14
  718. transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +67 -68
  719. transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +28 -22
  720. transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py +2 -8
  721. transformers/models/lasr/configuration_lasr.py +5 -3
  722. transformers/models/lasr/feature_extraction_lasr.py +10 -12
  723. transformers/models/lasr/modeling_lasr.py +21 -23
  724. transformers/models/lasr/modular_lasr.py +16 -11
  725. transformers/models/lasr/processing_lasr.py +12 -8
  726. transformers/models/lasr/tokenization_lasr.py +2 -4
  727. transformers/models/layoutlm/configuration_layoutlm.py +0 -1
  728. transformers/models/layoutlm/modeling_layoutlm.py +72 -72
  729. transformers/models/layoutlmv2/configuration_layoutlmv2.py +0 -1
  730. transformers/models/layoutlmv2/image_processing_layoutlmv2.py +18 -21
  731. transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +5 -7
  732. transformers/models/layoutlmv2/modeling_layoutlmv2.py +60 -50
  733. transformers/models/layoutlmv2/processing_layoutlmv2.py +14 -44
  734. transformers/models/layoutlmv2/tokenization_layoutlmv2.py +64 -74
  735. transformers/models/layoutlmv3/configuration_layoutlmv3.py +0 -1
  736. transformers/models/layoutlmv3/image_processing_layoutlmv3.py +24 -26
  737. transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +7 -9
  738. transformers/models/layoutlmv3/modeling_layoutlmv3.py +78 -56
  739. transformers/models/layoutlmv3/processing_layoutlmv3.py +14 -46
  740. transformers/models/layoutlmv3/tokenization_layoutlmv3.py +64 -75
  741. transformers/models/layoutxlm/configuration_layoutxlm.py +0 -1
  742. transformers/models/layoutxlm/modular_layoutxlm.py +0 -1
  743. transformers/models/layoutxlm/processing_layoutxlm.py +14 -44
  744. transformers/models/layoutxlm/tokenization_layoutxlm.py +65 -76
  745. transformers/models/led/configuration_led.py +1 -4
  746. transformers/models/led/modeling_led.py +119 -267
  747. transformers/models/levit/configuration_levit.py +0 -1
  748. transformers/models/levit/image_processing_levit.py +19 -21
  749. transformers/models/levit/image_processing_levit_fast.py +0 -1
  750. transformers/models/levit/modeling_levit.py +35 -19
  751. transformers/models/lfm2/configuration_lfm2.py +22 -23
  752. transformers/models/lfm2/modeling_lfm2.py +43 -45
  753. transformers/models/lfm2/modular_lfm2.py +29 -29
  754. transformers/models/lfm2_moe/__init__.py +0 -1
  755. transformers/models/lfm2_moe/configuration_lfm2_moe.py +1 -2
  756. transformers/models/lfm2_moe/modeling_lfm2_moe.py +58 -49
  757. transformers/models/lfm2_moe/modular_lfm2_moe.py +13 -37
  758. transformers/models/lfm2_vl/configuration_lfm2_vl.py +4 -1
  759. transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +34 -5
  760. transformers/models/lfm2_vl/modeling_lfm2_vl.py +42 -38
  761. transformers/models/lfm2_vl/modular_lfm2_vl.py +28 -29
  762. transformers/models/lfm2_vl/processing_lfm2_vl.py +96 -76
  763. transformers/models/lightglue/image_processing_lightglue.py +16 -15
  764. transformers/models/lightglue/image_processing_lightglue_fast.py +5 -6
  765. transformers/models/lightglue/modeling_lightglue.py +28 -30
  766. transformers/models/lightglue/modular_lightglue.py +28 -28
  767. transformers/models/lighton_ocr/__init__.py +28 -0
  768. transformers/models/lighton_ocr/configuration_lighton_ocr.py +128 -0
  769. transformers/models/lighton_ocr/modeling_lighton_ocr.py +460 -0
  770. transformers/models/lighton_ocr/modular_lighton_ocr.py +403 -0
  771. transformers/models/lighton_ocr/processing_lighton_ocr.py +229 -0
  772. transformers/models/lilt/configuration_lilt.py +0 -1
  773. transformers/models/lilt/modeling_lilt.py +72 -70
  774. transformers/models/llama/configuration_llama.py +21 -24
  775. transformers/models/llama/modeling_llama.py +32 -35
  776. transformers/models/llama/tokenization_llama.py +2 -4
  777. transformers/models/llama4/configuration_llama4.py +20 -22
  778. transformers/models/llama4/image_processing_llama4_fast.py +9 -11
  779. transformers/models/llama4/modeling_llama4.py +78 -75
  780. transformers/models/llama4/processing_llama4.py +33 -57
  781. transformers/models/llava/configuration_llava.py +0 -1
  782. transformers/models/llava/image_processing_llava.py +25 -28
  783. transformers/models/llava/image_processing_llava_fast.py +6 -8
  784. transformers/models/llava/modeling_llava.py +47 -44
  785. transformers/models/llava/processing_llava.py +18 -51
  786. transformers/models/llava_next/configuration_llava_next.py +0 -1
  787. transformers/models/llava_next/image_processing_llava_next.py +43 -45
  788. transformers/models/llava_next/image_processing_llava_next_fast.py +5 -7
  789. transformers/models/llava_next/modeling_llava_next.py +49 -47
  790. transformers/models/llava_next/processing_llava_next.py +18 -47
  791. transformers/models/llava_next_video/configuration_llava_next_video.py +0 -1
  792. transformers/models/llava_next_video/modeling_llava_next_video.py +60 -58
  793. transformers/models/llava_next_video/modular_llava_next_video.py +51 -49
  794. transformers/models/llava_next_video/processing_llava_next_video.py +21 -63
  795. transformers/models/llava_next_video/video_processing_llava_next_video.py +0 -1
  796. transformers/models/llava_onevision/configuration_llava_onevision.py +0 -1
  797. transformers/models/llava_onevision/image_processing_llava_onevision.py +40 -42
  798. transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +6 -8
  799. transformers/models/llava_onevision/modeling_llava_onevision.py +67 -65
  800. transformers/models/llava_onevision/modular_llava_onevision.py +58 -56
  801. transformers/models/llava_onevision/processing_llava_onevision.py +21 -53
  802. transformers/models/llava_onevision/video_processing_llava_onevision.py +0 -1
  803. transformers/models/longcat_flash/__init__.py +0 -1
  804. transformers/models/longcat_flash/configuration_longcat_flash.py +32 -35
  805. transformers/models/longcat_flash/modeling_longcat_flash.py +32 -32
  806. transformers/models/longcat_flash/modular_longcat_flash.py +18 -19
  807. transformers/models/longformer/configuration_longformer.py +1 -4
  808. transformers/models/longformer/modeling_longformer.py +99 -101
  809. transformers/models/longt5/configuration_longt5.py +0 -1
  810. transformers/models/longt5/modeling_longt5.py +43 -48
  811. transformers/models/luke/configuration_luke.py +0 -1
  812. transformers/models/luke/modeling_luke.py +179 -181
  813. transformers/models/luke/tokenization_luke.py +99 -105
  814. transformers/models/lw_detr/__init__.py +27 -0
  815. transformers/models/lw_detr/configuration_lw_detr.py +374 -0
  816. transformers/models/lw_detr/modeling_lw_detr.py +1698 -0
  817. transformers/models/lw_detr/modular_lw_detr.py +1611 -0
  818. transformers/models/lxmert/configuration_lxmert.py +0 -1
  819. transformers/models/lxmert/modeling_lxmert.py +63 -74
  820. transformers/models/m2m_100/configuration_m2m_100.py +0 -1
  821. transformers/models/m2m_100/modeling_m2m_100.py +79 -71
  822. transformers/models/m2m_100/tokenization_m2m_100.py +8 -8
  823. transformers/models/mamba/configuration_mamba.py +0 -1
  824. transformers/models/mamba/modeling_mamba.py +44 -44
  825. transformers/models/mamba2/configuration_mamba2.py +0 -1
  826. transformers/models/mamba2/modeling_mamba2.py +67 -68
  827. transformers/models/marian/configuration_marian.py +1 -2
  828. transformers/models/marian/modeling_marian.py +87 -86
  829. transformers/models/marian/tokenization_marian.py +6 -6
  830. transformers/models/markuplm/configuration_markuplm.py +0 -1
  831. transformers/models/markuplm/feature_extraction_markuplm.py +1 -2
  832. transformers/models/markuplm/modeling_markuplm.py +65 -70
  833. transformers/models/markuplm/processing_markuplm.py +31 -38
  834. transformers/models/markuplm/tokenization_markuplm.py +67 -77
  835. transformers/models/mask2former/configuration_mask2former.py +5 -8
  836. transformers/models/mask2former/image_processing_mask2former.py +84 -85
  837. transformers/models/mask2former/image_processing_mask2former_fast.py +30 -33
  838. transformers/models/mask2former/modeling_mask2former.py +99 -92
  839. transformers/models/mask2former/modular_mask2former.py +6 -8
  840. transformers/models/maskformer/configuration_maskformer.py +6 -9
  841. transformers/models/maskformer/configuration_maskformer_swin.py +0 -1
  842. transformers/models/maskformer/image_processing_maskformer.py +84 -85
  843. transformers/models/maskformer/image_processing_maskformer_fast.py +29 -33
  844. transformers/models/maskformer/modeling_maskformer.py +65 -59
  845. transformers/models/maskformer/modeling_maskformer_swin.py +34 -32
  846. transformers/models/mbart/configuration_mbart.py +1 -1
  847. transformers/models/mbart/modeling_mbart.py +118 -113
  848. transformers/models/mbart/tokenization_mbart.py +2 -4
  849. transformers/models/mbart50/tokenization_mbart50.py +3 -5
  850. transformers/models/megatron_bert/configuration_megatron_bert.py +0 -1
  851. transformers/models/megatron_bert/modeling_megatron_bert.py +141 -150
  852. transformers/models/metaclip_2/modeling_metaclip_2.py +48 -46
  853. transformers/models/metaclip_2/modular_metaclip_2.py +21 -21
  854. transformers/models/mgp_str/configuration_mgp_str.py +0 -1
  855. transformers/models/mgp_str/modeling_mgp_str.py +14 -16
  856. transformers/models/mgp_str/processing_mgp_str.py +3 -20
  857. transformers/models/mgp_str/tokenization_mgp_str.py +1 -3
  858. transformers/models/mimi/configuration_mimi.py +38 -40
  859. transformers/models/mimi/modeling_mimi.py +100 -82
  860. transformers/models/minimax/__init__.py +0 -1
  861. transformers/models/minimax/configuration_minimax.py +32 -36
  862. transformers/models/minimax/modeling_minimax.py +57 -47
  863. transformers/models/minimax/modular_minimax.py +62 -54
  864. transformers/models/minimax_m2/__init__.py +28 -0
  865. transformers/models/minimax_m2/configuration_minimax_m2.py +211 -0
  866. transformers/models/minimax_m2/modeling_minimax_m2.py +704 -0
  867. transformers/models/minimax_m2/modular_minimax_m2.py +369 -0
  868. transformers/models/ministral/configuration_ministral.py +20 -22
  869. transformers/models/ministral/modeling_ministral.py +32 -34
  870. transformers/models/ministral/modular_ministral.py +27 -29
  871. transformers/models/ministral3/configuration_ministral3.py +19 -22
  872. transformers/models/ministral3/modeling_ministral3.py +32 -34
  873. transformers/models/ministral3/modular_ministral3.py +4 -5
  874. transformers/models/mistral/configuration_mistral.py +19 -22
  875. transformers/models/mistral/modeling_mistral.py +32 -34
  876. transformers/models/mistral/modular_mistral.py +11 -12
  877. transformers/models/mistral3/configuration_mistral3.py +0 -1
  878. transformers/models/mistral3/modeling_mistral3.py +53 -46
  879. transformers/models/mistral3/modular_mistral3.py +38 -36
  880. transformers/models/mixtral/configuration_mixtral.py +24 -27
  881. transformers/models/mixtral/modeling_mixtral.py +47 -42
  882. transformers/models/mixtral/modular_mixtral.py +32 -31
  883. transformers/models/mlcd/configuration_mlcd.py +0 -1
  884. transformers/models/mlcd/modeling_mlcd.py +16 -12
  885. transformers/models/mlcd/modular_mlcd.py +13 -11
  886. transformers/models/mllama/configuration_mllama.py +5 -8
  887. transformers/models/mllama/image_processing_mllama.py +23 -25
  888. transformers/models/mllama/image_processing_mllama_fast.py +5 -6
  889. transformers/models/mllama/modeling_mllama.py +94 -86
  890. transformers/models/mllama/processing_mllama.py +6 -55
  891. transformers/models/mluke/tokenization_mluke.py +97 -103
  892. transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +1 -3
  893. transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +95 -97
  894. transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +1 -3
  895. transformers/models/mobilebert/configuration_mobilebert.py +0 -1
  896. transformers/models/mobilebert/modeling_mobilebert.py +77 -85
  897. transformers/models/mobilebert/tokenization_mobilebert.py +0 -1
  898. transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +0 -1
  899. transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +20 -23
  900. transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py +0 -1
  901. transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +13 -16
  902. transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +0 -1
  903. transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +48 -51
  904. transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +10 -12
  905. transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +17 -20
  906. transformers/models/mobilevit/configuration_mobilevit.py +0 -1
  907. transformers/models/mobilevit/image_processing_mobilevit.py +46 -49
  908. transformers/models/mobilevit/image_processing_mobilevit_fast.py +9 -11
  909. transformers/models/mobilevit/modeling_mobilevit.py +21 -19
  910. transformers/models/mobilevitv2/configuration_mobilevitv2.py +0 -1
  911. transformers/models/mobilevitv2/modeling_mobilevitv2.py +21 -20
  912. transformers/models/modernbert/configuration_modernbert.py +34 -34
  913. transformers/models/modernbert/modeling_modernbert.py +135 -126
  914. transformers/models/modernbert/modular_modernbert.py +167 -156
  915. transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +30 -32
  916. transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +54 -48
  917. transformers/models/modernbert_decoder/modular_modernbert_decoder.py +78 -71
  918. transformers/models/moonshine/configuration_moonshine.py +22 -24
  919. transformers/models/moonshine/modeling_moonshine.py +64 -66
  920. transformers/models/moonshine/modular_moonshine.py +72 -73
  921. transformers/models/moshi/configuration_moshi.py +18 -21
  922. transformers/models/moshi/modeling_moshi.py +150 -183
  923. transformers/models/mpnet/configuration_mpnet.py +0 -1
  924. transformers/models/mpnet/modeling_mpnet.py +57 -57
  925. transformers/models/mpnet/tokenization_mpnet.py +1 -4
  926. transformers/models/mpt/configuration_mpt.py +1 -9
  927. transformers/models/mpt/modeling_mpt.py +58 -60
  928. transformers/models/mra/configuration_mra.py +0 -1
  929. transformers/models/mra/modeling_mra.py +58 -57
  930. transformers/models/mt5/configuration_mt5.py +2 -4
  931. transformers/models/mt5/modeling_mt5.py +75 -87
  932. transformers/models/musicgen/configuration_musicgen.py +0 -1
  933. transformers/models/musicgen/modeling_musicgen.py +113 -120
  934. transformers/models/musicgen/processing_musicgen.py +3 -21
  935. transformers/models/musicgen_melody/configuration_musicgen_melody.py +0 -1
  936. transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +8 -9
  937. transformers/models/musicgen_melody/modeling_musicgen_melody.py +110 -109
  938. transformers/models/musicgen_melody/processing_musicgen_melody.py +3 -22
  939. transformers/models/mvp/configuration_mvp.py +0 -1
  940. transformers/models/mvp/modeling_mvp.py +122 -119
  941. transformers/models/myt5/tokenization_myt5.py +8 -10
  942. transformers/models/nanochat/configuration_nanochat.py +0 -1
  943. transformers/models/nanochat/modeling_nanochat.py +33 -36
  944. transformers/models/nanochat/modular_nanochat.py +12 -14
  945. transformers/models/nemotron/configuration_nemotron.py +20 -23
  946. transformers/models/nemotron/modeling_nemotron.py +51 -54
  947. transformers/models/nllb/tokenization_nllb.py +7 -9
  948. transformers/models/nllb_moe/configuration_nllb_moe.py +1 -1
  949. transformers/models/nllb_moe/modeling_nllb_moe.py +77 -69
  950. transformers/models/nougat/image_processing_nougat.py +29 -32
  951. transformers/models/nougat/image_processing_nougat_fast.py +4 -6
  952. transformers/models/nougat/processing_nougat.py +37 -39
  953. transformers/models/nougat/tokenization_nougat.py +16 -23
  954. transformers/models/nystromformer/configuration_nystromformer.py +0 -1
  955. transformers/models/nystromformer/modeling_nystromformer.py +68 -63
  956. transformers/models/olmo/configuration_olmo.py +18 -21
  957. transformers/models/olmo/modeling_olmo.py +32 -35
  958. transformers/models/olmo/modular_olmo.py +5 -9
  959. transformers/models/olmo2/configuration_olmo2.py +18 -21
  960. transformers/models/olmo2/modeling_olmo2.py +33 -36
  961. transformers/models/olmo2/modular_olmo2.py +29 -31
  962. transformers/models/olmo3/__init__.py +0 -1
  963. transformers/models/olmo3/configuration_olmo3.py +20 -23
  964. transformers/models/olmo3/modeling_olmo3.py +32 -35
  965. transformers/models/olmo3/modular_olmo3.py +31 -33
  966. transformers/models/olmoe/configuration_olmoe.py +24 -26
  967. transformers/models/olmoe/modeling_olmoe.py +49 -43
  968. transformers/models/olmoe/modular_olmoe.py +16 -15
  969. transformers/models/omdet_turbo/configuration_omdet_turbo.py +2 -3
  970. transformers/models/omdet_turbo/modeling_omdet_turbo.py +42 -40
  971. transformers/models/omdet_turbo/processing_omdet_turbo.py +19 -67
  972. transformers/models/oneformer/configuration_oneformer.py +5 -8
  973. transformers/models/oneformer/image_processing_oneformer.py +83 -84
  974. transformers/models/oneformer/image_processing_oneformer_fast.py +33 -34
  975. transformers/models/oneformer/modeling_oneformer.py +130 -162
  976. transformers/models/oneformer/processing_oneformer.py +28 -43
  977. transformers/models/openai/configuration_openai.py +0 -1
  978. transformers/models/openai/modeling_openai.py +62 -51
  979. transformers/models/openai/tokenization_openai.py +2 -5
  980. transformers/models/opt/configuration_opt.py +0 -1
  981. transformers/models/opt/modeling_opt.py +74 -75
  982. transformers/models/ovis2/__init__.py +0 -1
  983. transformers/models/ovis2/configuration_ovis2.py +0 -1
  984. transformers/models/ovis2/image_processing_ovis2.py +22 -24
  985. transformers/models/ovis2/image_processing_ovis2_fast.py +6 -8
  986. transformers/models/ovis2/modeling_ovis2.py +58 -48
  987. transformers/models/ovis2/modular_ovis2.py +38 -32
  988. transformers/models/ovis2/processing_ovis2.py +12 -40
  989. transformers/models/owlv2/configuration_owlv2.py +0 -1
  990. transformers/models/owlv2/image_processing_owlv2.py +20 -21
  991. transformers/models/owlv2/image_processing_owlv2_fast.py +7 -10
  992. transformers/models/owlv2/modeling_owlv2.py +89 -90
  993. transformers/models/owlv2/modular_owlv2.py +6 -9
  994. transformers/models/owlv2/processing_owlv2.py +20 -49
  995. transformers/models/owlvit/configuration_owlvit.py +0 -1
  996. transformers/models/owlvit/image_processing_owlvit.py +21 -22
  997. transformers/models/owlvit/image_processing_owlvit_fast.py +2 -3
  998. transformers/models/owlvit/modeling_owlvit.py +88 -89
  999. transformers/models/owlvit/processing_owlvit.py +20 -48
  1000. transformers/models/paddleocr_vl/__init__.py +0 -1
  1001. transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +19 -19
  1002. transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +37 -37
  1003. transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +12 -12
  1004. transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +104 -90
  1005. transformers/models/paddleocr_vl/modular_paddleocr_vl.py +90 -80
  1006. transformers/models/paddleocr_vl/processing_paddleocr_vl.py +1 -3
  1007. transformers/models/paligemma/configuration_paligemma.py +0 -1
  1008. transformers/models/paligemma/modeling_paligemma.py +73 -67
  1009. transformers/models/paligemma/processing_paligemma.py +13 -66
  1010. transformers/models/parakeet/configuration_parakeet.py +1 -4
  1011. transformers/models/parakeet/feature_extraction_parakeet.py +10 -12
  1012. transformers/models/parakeet/modeling_parakeet.py +23 -22
  1013. transformers/models/parakeet/modular_parakeet.py +21 -18
  1014. transformers/models/parakeet/processing_parakeet.py +12 -5
  1015. transformers/models/parakeet/{tokenization_parakeet_fast.py → tokenization_parakeet.py} +5 -7
  1016. transformers/models/patchtsmixer/configuration_patchtsmixer.py +5 -8
  1017. transformers/models/patchtsmixer/modeling_patchtsmixer.py +64 -62
  1018. transformers/models/patchtst/configuration_patchtst.py +6 -9
  1019. transformers/models/patchtst/modeling_patchtst.py +77 -78
  1020. transformers/models/pe_audio/__init__.py +29 -0
  1021. transformers/models/pe_audio/configuration_pe_audio.py +204 -0
  1022. transformers/models/pe_audio/feature_extraction_pe_audio.py +160 -0
  1023. transformers/models/pe_audio/modeling_pe_audio.py +819 -0
  1024. transformers/models/pe_audio/modular_pe_audio.py +298 -0
  1025. transformers/models/pe_audio/processing_pe_audio.py +23 -0
  1026. transformers/models/pe_audio_video/__init__.py +28 -0
  1027. transformers/models/pe_audio_video/configuration_pe_audio_video.py +223 -0
  1028. transformers/models/pe_audio_video/modeling_pe_audio_video.py +971 -0
  1029. transformers/models/pe_audio_video/modular_pe_audio_video.py +763 -0
  1030. transformers/models/pe_audio_video/processing_pe_audio_video.py +24 -0
  1031. transformers/models/pe_video/__init__.py +29 -0
  1032. transformers/models/pe_video/configuration_pe_video.py +209 -0
  1033. transformers/models/pe_video/modeling_pe_video.py +635 -0
  1034. transformers/models/pe_video/modular_pe_video.py +218 -0
  1035. transformers/models/pe_video/processing_pe_video.py +10 -0
  1036. transformers/models/pe_video/video_processing_pe_video.py +64 -0
  1037. transformers/models/pegasus/configuration_pegasus.py +1 -1
  1038. transformers/models/pegasus/modeling_pegasus.py +66 -65
  1039. transformers/models/pegasus/tokenization_pegasus.py +1 -4
  1040. transformers/models/pegasus_x/configuration_pegasus_x.py +0 -1
  1041. transformers/models/pegasus_x/modeling_pegasus_x.py +51 -52
  1042. transformers/models/perceiver/configuration_perceiver.py +0 -1
  1043. transformers/models/perceiver/image_processing_perceiver.py +22 -25
  1044. transformers/models/perceiver/image_processing_perceiver_fast.py +5 -7
  1045. transformers/models/perceiver/modeling_perceiver.py +140 -137
  1046. transformers/models/perceiver/tokenization_perceiver.py +3 -6
  1047. transformers/models/perception_lm/configuration_perception_lm.py +0 -1
  1048. transformers/models/perception_lm/image_processing_perception_lm_fast.py +8 -10
  1049. transformers/models/perception_lm/modeling_perception_lm.py +45 -43
  1050. transformers/models/perception_lm/modular_perception_lm.py +38 -36
  1051. transformers/models/perception_lm/processing_perception_lm.py +13 -47
  1052. transformers/models/perception_lm/video_processing_perception_lm.py +0 -1
  1053. transformers/models/persimmon/configuration_persimmon.py +18 -21
  1054. transformers/models/persimmon/modeling_persimmon.py +40 -43
  1055. transformers/models/phi/configuration_phi.py +19 -22
  1056. transformers/models/phi/modeling_phi.py +36 -38
  1057. transformers/models/phi/modular_phi.py +23 -23
  1058. transformers/models/phi3/configuration_phi3.py +23 -26
  1059. transformers/models/phi3/modeling_phi3.py +34 -37
  1060. transformers/models/phi3/modular_phi3.py +13 -17
  1061. transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +25 -26
  1062. transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py +7 -9
  1063. transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +7 -7
  1064. transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +58 -57
  1065. transformers/models/phi4_multimodal/modular_phi4_multimodal.py +62 -60
  1066. transformers/models/phi4_multimodal/processing_phi4_multimodal.py +7 -44
  1067. transformers/models/phimoe/configuration_phimoe.py +26 -29
  1068. transformers/models/phimoe/modeling_phimoe.py +47 -42
  1069. transformers/models/phimoe/modular_phimoe.py +1 -2
  1070. transformers/models/phobert/tokenization_phobert.py +4 -6
  1071. transformers/models/pix2struct/configuration_pix2struct.py +0 -1
  1072. transformers/models/pix2struct/image_processing_pix2struct.py +15 -19
  1073. transformers/models/pix2struct/image_processing_pix2struct_fast.py +7 -10
  1074. transformers/models/pix2struct/modeling_pix2struct.py +42 -45
  1075. transformers/models/pix2struct/processing_pix2struct.py +5 -30
  1076. transformers/models/pixio/__init__.py +29 -0
  1077. transformers/models/pixio/configuration_pixio.py +150 -0
  1078. transformers/models/pixio/modeling_pixio.py +505 -0
  1079. transformers/models/pixio/modular_pixio.py +401 -0
  1080. transformers/models/pixtral/configuration_pixtral.py +11 -14
  1081. transformers/models/pixtral/image_processing_pixtral.py +26 -28
  1082. transformers/models/pixtral/image_processing_pixtral_fast.py +5 -6
  1083. transformers/models/pixtral/modeling_pixtral.py +23 -26
  1084. transformers/models/pixtral/processing_pixtral.py +21 -53
  1085. transformers/models/plbart/configuration_plbart.py +1 -1
  1086. transformers/models/plbart/modeling_plbart.py +107 -102
  1087. transformers/models/plbart/modular_plbart.py +36 -32
  1088. transformers/models/plbart/tokenization_plbart.py +4 -5
  1089. transformers/models/poolformer/configuration_poolformer.py +0 -1
  1090. transformers/models/poolformer/image_processing_poolformer.py +21 -24
  1091. transformers/models/poolformer/image_processing_poolformer_fast.py +6 -8
  1092. transformers/models/poolformer/modeling_poolformer.py +21 -13
  1093. transformers/models/pop2piano/configuration_pop2piano.py +0 -2
  1094. transformers/models/pop2piano/feature_extraction_pop2piano.py +6 -9
  1095. transformers/models/pop2piano/modeling_pop2piano.py +22 -23
  1096. transformers/models/pop2piano/processing_pop2piano.py +25 -33
  1097. transformers/models/pop2piano/tokenization_pop2piano.py +15 -23
  1098. transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +3 -3
  1099. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +28 -28
  1100. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +14 -15
  1101. transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +9 -10
  1102. transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +9 -10
  1103. transformers/models/prophetnet/configuration_prophetnet.py +26 -28
  1104. transformers/models/prophetnet/modeling_prophetnet.py +111 -131
  1105. transformers/models/prophetnet/tokenization_prophetnet.py +14 -16
  1106. transformers/models/pvt/configuration_pvt.py +0 -1
  1107. transformers/models/pvt/image_processing_pvt.py +17 -20
  1108. transformers/models/pvt/image_processing_pvt_fast.py +0 -1
  1109. transformers/models/pvt/modeling_pvt.py +19 -21
  1110. transformers/models/pvt_v2/configuration_pvt_v2.py +2 -4
  1111. transformers/models/pvt_v2/modeling_pvt_v2.py +21 -23
  1112. transformers/models/qwen2/configuration_qwen2.py +18 -21
  1113. transformers/models/qwen2/modeling_qwen2.py +32 -34
  1114. transformers/models/qwen2/modular_qwen2.py +11 -12
  1115. transformers/models/qwen2/tokenization_qwen2.py +2 -5
  1116. transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +20 -23
  1117. transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +239 -192
  1118. transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +174 -127
  1119. transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +41 -49
  1120. transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +22 -25
  1121. transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +112 -101
  1122. transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +72 -107
  1123. transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py +7 -43
  1124. transformers/models/qwen2_audio/configuration_qwen2_audio.py +0 -1
  1125. transformers/models/qwen2_audio/modeling_qwen2_audio.py +29 -31
  1126. transformers/models/qwen2_audio/processing_qwen2_audio.py +13 -42
  1127. transformers/models/qwen2_moe/configuration_qwen2_moe.py +28 -31
  1128. transformers/models/qwen2_moe/modeling_qwen2_moe.py +48 -43
  1129. transformers/models/qwen2_moe/modular_qwen2_moe.py +7 -10
  1130. transformers/models/qwen2_vl/configuration_qwen2_vl.py +22 -24
  1131. transformers/models/qwen2_vl/image_processing_qwen2_vl.py +41 -42
  1132. transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +8 -9
  1133. transformers/models/qwen2_vl/modeling_qwen2_vl.py +108 -96
  1134. transformers/models/qwen2_vl/processing_qwen2_vl.py +7 -44
  1135. transformers/models/qwen2_vl/video_processing_qwen2_vl.py +35 -13
  1136. transformers/models/qwen3/configuration_qwen3.py +20 -23
  1137. transformers/models/qwen3/modeling_qwen3.py +32 -35
  1138. transformers/models/qwen3/modular_qwen3.py +4 -6
  1139. transformers/models/qwen3_moe/configuration_qwen3_moe.py +25 -28
  1140. transformers/models/qwen3_moe/modeling_qwen3_moe.py +48 -43
  1141. transformers/models/qwen3_moe/modular_qwen3_moe.py +10 -13
  1142. transformers/models/qwen3_next/configuration_qwen3_next.py +31 -34
  1143. transformers/models/qwen3_next/modeling_qwen3_next.py +43 -48
  1144. transformers/models/qwen3_next/modular_qwen3_next.py +33 -34
  1145. transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +89 -88
  1146. transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +199 -156
  1147. transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +170 -152
  1148. transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +40 -48
  1149. transformers/models/qwen3_vl/configuration_qwen3_vl.py +21 -24
  1150. transformers/models/qwen3_vl/modeling_qwen3_vl.py +91 -81
  1151. transformers/models/qwen3_vl/modular_qwen3_vl.py +86 -112
  1152. transformers/models/qwen3_vl/processing_qwen3_vl.py +6 -42
  1153. transformers/models/qwen3_vl/video_processing_qwen3_vl.py +10 -12
  1154. transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +21 -25
  1155. transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +174 -195
  1156. transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +65 -117
  1157. transformers/models/rag/configuration_rag.py +0 -9
  1158. transformers/models/rag/modeling_rag.py +123 -127
  1159. transformers/models/rag/retrieval_rag.py +2 -4
  1160. transformers/models/rag/tokenization_rag.py +0 -50
  1161. transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +21 -24
  1162. transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +34 -36
  1163. transformers/models/reformer/configuration_reformer.py +0 -1
  1164. transformers/models/reformer/modeling_reformer.py +76 -69
  1165. transformers/models/reformer/tokenization_reformer.py +3 -6
  1166. transformers/models/regnet/configuration_regnet.py +0 -1
  1167. transformers/models/regnet/modeling_regnet.py +11 -9
  1168. transformers/models/rembert/configuration_rembert.py +0 -1
  1169. transformers/models/rembert/modeling_rembert.py +115 -111
  1170. transformers/models/rembert/tokenization_rembert.py +1 -4
  1171. transformers/models/resnet/configuration_resnet.py +0 -1
  1172. transformers/models/resnet/modeling_resnet.py +16 -13
  1173. transformers/models/roberta/configuration_roberta.py +0 -1
  1174. transformers/models/roberta/modeling_roberta.py +94 -93
  1175. transformers/models/roberta/modular_roberta.py +58 -58
  1176. transformers/models/roberta/tokenization_roberta.py +2 -5
  1177. transformers/models/roberta/tokenization_roberta_old.py +2 -4
  1178. transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +0 -1
  1179. transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +94 -93
  1180. transformers/models/roc_bert/configuration_roc_bert.py +0 -1
  1181. transformers/models/roc_bert/modeling_roc_bert.py +122 -121
  1182. transformers/models/roc_bert/tokenization_roc_bert.py +88 -94
  1183. transformers/models/roformer/configuration_roformer.py +0 -1
  1184. transformers/models/roformer/modeling_roformer.py +79 -81
  1185. transformers/models/roformer/tokenization_roformer.py +3 -6
  1186. transformers/models/roformer/tokenization_utils.py +0 -1
  1187. transformers/models/rt_detr/configuration_rt_detr.py +1 -2
  1188. transformers/models/rt_detr/configuration_rt_detr_resnet.py +0 -1
  1189. transformers/models/rt_detr/image_processing_rt_detr.py +54 -55
  1190. transformers/models/rt_detr/image_processing_rt_detr_fast.py +15 -15
  1191. transformers/models/rt_detr/modeling_rt_detr.py +84 -82
  1192. transformers/models/rt_detr/modeling_rt_detr_resnet.py +10 -7
  1193. transformers/models/rt_detr/modular_rt_detr.py +14 -14
  1194. transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +2 -4
  1195. transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +86 -81
  1196. transformers/models/rt_detr_v2/modular_rt_detr_v2.py +10 -7
  1197. transformers/models/rwkv/configuration_rwkv.py +0 -1
  1198. transformers/models/rwkv/modeling_rwkv.py +30 -32
  1199. transformers/models/sam/configuration_sam.py +1 -1
  1200. transformers/models/sam/image_processing_sam.py +59 -60
  1201. transformers/models/sam/image_processing_sam_fast.py +21 -23
  1202. transformers/models/sam/modeling_sam.py +37 -36
  1203. transformers/models/sam/processing_sam.py +39 -27
  1204. transformers/models/sam2/configuration_sam2.py +1 -2
  1205. transformers/models/sam2/image_processing_sam2_fast.py +14 -15
  1206. transformers/models/sam2/modeling_sam2.py +50 -48
  1207. transformers/models/sam2/modular_sam2.py +48 -45
  1208. transformers/models/sam2/processing_sam2.py +31 -47
  1209. transformers/models/sam2_video/configuration_sam2_video.py +0 -1
  1210. transformers/models/sam2_video/modeling_sam2_video.py +119 -112
  1211. transformers/models/sam2_video/modular_sam2_video.py +91 -97
  1212. transformers/models/sam2_video/processing_sam2_video.py +49 -66
  1213. transformers/models/sam2_video/video_processing_sam2_video.py +1 -4
  1214. transformers/models/sam3/configuration_sam3.py +21 -2
  1215. transformers/models/sam3/image_processing_sam3_fast.py +17 -20
  1216. transformers/models/sam3/modeling_sam3.py +77 -56
  1217. transformers/models/sam3/modular_sam3.py +3 -8
  1218. transformers/models/sam3/processing_sam3.py +29 -48
  1219. transformers/models/sam3_tracker/__init__.py +0 -1
  1220. transformers/models/sam3_tracker/configuration_sam3_tracker.py +0 -1
  1221. transformers/models/sam3_tracker/modeling_sam3_tracker.py +36 -36
  1222. transformers/models/sam3_tracker/modular_sam3_tracker.py +2 -1
  1223. transformers/models/sam3_tracker/processing_sam3_tracker.py +31 -47
  1224. transformers/models/sam3_tracker_video/__init__.py +0 -1
  1225. transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +25 -1
  1226. transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +96 -85
  1227. transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +27 -6
  1228. transformers/models/sam3_tracker_video/processing_sam3_tracker_video.py +50 -66
  1229. transformers/models/sam3_video/configuration_sam3_video.py +14 -1
  1230. transformers/models/sam3_video/modeling_sam3_video.py +32 -34
  1231. transformers/models/sam3_video/processing_sam3_video.py +26 -46
  1232. transformers/models/sam_hq/__init__.py +1 -1
  1233. transformers/models/sam_hq/configuration_sam_hq.py +1 -1
  1234. transformers/models/sam_hq/modeling_sam_hq.py +65 -64
  1235. transformers/models/sam_hq/modular_sam_hq.py +17 -19
  1236. transformers/models/sam_hq/{processing_samhq.py → processing_sam_hq.py} +39 -28
  1237. transformers/models/seamless_m4t/configuration_seamless_m4t.py +0 -1
  1238. transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +8 -11
  1239. transformers/models/seamless_m4t/modeling_seamless_m4t.py +207 -193
  1240. transformers/models/seamless_m4t/processing_seamless_m4t.py +18 -39
  1241. transformers/models/seamless_m4t/tokenization_seamless_m4t.py +15 -20
  1242. transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +0 -1
  1243. transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +199 -195
  1244. transformers/models/seed_oss/configuration_seed_oss.py +23 -25
  1245. transformers/models/seed_oss/modeling_seed_oss.py +31 -33
  1246. transformers/models/seed_oss/modular_seed_oss.py +3 -4
  1247. transformers/models/segformer/configuration_segformer.py +0 -10
  1248. transformers/models/segformer/image_processing_segformer.py +39 -42
  1249. transformers/models/segformer/image_processing_segformer_fast.py +7 -9
  1250. transformers/models/segformer/modeling_segformer.py +26 -28
  1251. transformers/models/segformer/modular_segformer.py +5 -7
  1252. transformers/models/seggpt/configuration_seggpt.py +0 -1
  1253. transformers/models/seggpt/image_processing_seggpt.py +38 -41
  1254. transformers/models/seggpt/modeling_seggpt.py +28 -30
  1255. transformers/models/sew/configuration_sew.py +0 -1
  1256. transformers/models/sew/modeling_sew.py +33 -35
  1257. transformers/models/sew/modular_sew.py +10 -12
  1258. transformers/models/sew_d/configuration_sew_d.py +0 -1
  1259. transformers/models/sew_d/modeling_sew_d.py +28 -30
  1260. transformers/models/shieldgemma2/configuration_shieldgemma2.py +0 -1
  1261. transformers/models/shieldgemma2/modeling_shieldgemma2.py +16 -17
  1262. transformers/models/shieldgemma2/processing_shieldgemma2.py +3 -5
  1263. transformers/models/siglip/configuration_siglip.py +0 -1
  1264. transformers/models/siglip/image_processing_siglip.py +17 -20
  1265. transformers/models/siglip/image_processing_siglip_fast.py +0 -1
  1266. transformers/models/siglip/modeling_siglip.py +62 -41
  1267. transformers/models/siglip/processing_siglip.py +2 -14
  1268. transformers/models/siglip/tokenization_siglip.py +6 -7
  1269. transformers/models/siglip2/configuration_siglip2.py +1 -1
  1270. transformers/models/siglip2/image_processing_siglip2.py +15 -16
  1271. transformers/models/siglip2/image_processing_siglip2_fast.py +4 -5
  1272. transformers/models/siglip2/modeling_siglip2.py +114 -92
  1273. transformers/models/siglip2/modular_siglip2.py +23 -25
  1274. transformers/models/siglip2/processing_siglip2.py +2 -14
  1275. transformers/models/smollm3/configuration_smollm3.py +23 -26
  1276. transformers/models/smollm3/modeling_smollm3.py +32 -35
  1277. transformers/models/smollm3/modular_smollm3.py +27 -29
  1278. transformers/models/smolvlm/configuration_smolvlm.py +1 -1
  1279. transformers/models/smolvlm/image_processing_smolvlm.py +42 -43
  1280. transformers/models/smolvlm/image_processing_smolvlm_fast.py +12 -12
  1281. transformers/models/smolvlm/modeling_smolvlm.py +56 -53
  1282. transformers/models/smolvlm/modular_smolvlm.py +15 -17
  1283. transformers/models/smolvlm/processing_smolvlm.py +15 -76
  1284. transformers/models/smolvlm/video_processing_smolvlm.py +7 -9
  1285. transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +0 -1
  1286. transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +20 -23
  1287. transformers/models/speech_to_text/configuration_speech_to_text.py +0 -1
  1288. transformers/models/speech_to_text/feature_extraction_speech_to_text.py +10 -13
  1289. transformers/models/speech_to_text/modeling_speech_to_text.py +62 -54
  1290. transformers/models/speech_to_text/processing_speech_to_text.py +4 -30
  1291. transformers/models/speech_to_text/tokenization_speech_to_text.py +5 -6
  1292. transformers/models/speecht5/configuration_speecht5.py +0 -1
  1293. transformers/models/speecht5/feature_extraction_speecht5.py +16 -37
  1294. transformers/models/speecht5/modeling_speecht5.py +200 -174
  1295. transformers/models/speecht5/number_normalizer.py +0 -1
  1296. transformers/models/speecht5/processing_speecht5.py +3 -37
  1297. transformers/models/speecht5/tokenization_speecht5.py +4 -5
  1298. transformers/models/splinter/configuration_splinter.py +0 -1
  1299. transformers/models/splinter/modeling_splinter.py +63 -59
  1300. transformers/models/splinter/tokenization_splinter.py +2 -4
  1301. transformers/models/squeezebert/configuration_squeezebert.py +0 -1
  1302. transformers/models/squeezebert/modeling_squeezebert.py +62 -62
  1303. transformers/models/squeezebert/tokenization_squeezebert.py +0 -1
  1304. transformers/models/stablelm/configuration_stablelm.py +20 -23
  1305. transformers/models/stablelm/modeling_stablelm.py +40 -43
  1306. transformers/models/starcoder2/configuration_starcoder2.py +19 -22
  1307. transformers/models/starcoder2/modeling_starcoder2.py +34 -37
  1308. transformers/models/starcoder2/modular_starcoder2.py +13 -15
  1309. transformers/models/superglue/configuration_superglue.py +3 -3
  1310. transformers/models/superglue/image_processing_superglue.py +15 -15
  1311. transformers/models/superglue/image_processing_superglue_fast.py +5 -7
  1312. transformers/models/superglue/modeling_superglue.py +32 -33
  1313. transformers/models/superpoint/image_processing_superpoint.py +15 -15
  1314. transformers/models/superpoint/image_processing_superpoint_fast.py +5 -7
  1315. transformers/models/superpoint/modeling_superpoint.py +13 -14
  1316. transformers/models/swiftformer/configuration_swiftformer.py +0 -1
  1317. transformers/models/swiftformer/modeling_swiftformer.py +16 -14
  1318. transformers/models/swin/configuration_swin.py +0 -1
  1319. transformers/models/swin/modeling_swin.py +74 -82
  1320. transformers/models/swin2sr/configuration_swin2sr.py +0 -1
  1321. transformers/models/swin2sr/image_processing_swin2sr.py +10 -13
  1322. transformers/models/swin2sr/image_processing_swin2sr_fast.py +2 -6
  1323. transformers/models/swin2sr/modeling_swin2sr.py +75 -61
  1324. transformers/models/swinv2/configuration_swinv2.py +0 -1
  1325. transformers/models/swinv2/modeling_swinv2.py +96 -100
  1326. transformers/models/switch_transformers/configuration_switch_transformers.py +0 -1
  1327. transformers/models/switch_transformers/modeling_switch_transformers.py +34 -41
  1328. transformers/models/switch_transformers/modular_switch_transformers.py +31 -38
  1329. transformers/models/t5/configuration_t5.py +7 -2
  1330. transformers/models/t5/modeling_t5.py +76 -84
  1331. transformers/models/t5/tokenization_t5.py +1 -3
  1332. transformers/models/t5gemma/configuration_t5gemma.py +33 -34
  1333. transformers/models/t5gemma/modeling_t5gemma.py +97 -100
  1334. transformers/models/t5gemma/modular_t5gemma.py +117 -118
  1335. transformers/models/t5gemma2/configuration_t5gemma2.py +59 -96
  1336. transformers/models/t5gemma2/modeling_t5gemma2.py +109 -103
  1337. transformers/models/t5gemma2/modular_t5gemma2.py +375 -91
  1338. transformers/models/table_transformer/configuration_table_transformer.py +1 -2
  1339. transformers/models/table_transformer/modeling_table_transformer.py +47 -49
  1340. transformers/models/tapas/configuration_tapas.py +0 -1
  1341. transformers/models/tapas/modeling_tapas.py +64 -66
  1342. transformers/models/tapas/tokenization_tapas.py +115 -153
  1343. transformers/models/textnet/configuration_textnet.py +0 -1
  1344. transformers/models/textnet/image_processing_textnet.py +22 -25
  1345. transformers/models/textnet/image_processing_textnet_fast.py +5 -7
  1346. transformers/models/textnet/modeling_textnet.py +13 -14
  1347. transformers/models/time_series_transformer/configuration_time_series_transformer.py +5 -8
  1348. transformers/models/time_series_transformer/modeling_time_series_transformer.py +79 -81
  1349. transformers/models/timesfm/configuration_timesfm.py +0 -1
  1350. transformers/models/timesfm/modeling_timesfm.py +29 -19
  1351. transformers/models/timesfm/modular_timesfm.py +28 -18
  1352. transformers/models/timesformer/configuration_timesformer.py +0 -1
  1353. transformers/models/timesformer/modeling_timesformer.py +13 -16
  1354. transformers/models/timm_backbone/configuration_timm_backbone.py +0 -1
  1355. transformers/models/timm_backbone/modeling_timm_backbone.py +17 -15
  1356. transformers/models/timm_wrapper/configuration_timm_wrapper.py +5 -3
  1357. transformers/models/timm_wrapper/image_processing_timm_wrapper.py +4 -5
  1358. transformers/models/timm_wrapper/modeling_timm_wrapper.py +32 -28
  1359. transformers/models/trocr/configuration_trocr.py +0 -1
  1360. transformers/models/trocr/modeling_trocr.py +39 -42
  1361. transformers/models/trocr/processing_trocr.py +5 -25
  1362. transformers/models/tvp/configuration_tvp.py +5 -2
  1363. transformers/models/tvp/image_processing_tvp.py +50 -52
  1364. transformers/models/tvp/image_processing_tvp_fast.py +9 -10
  1365. transformers/models/tvp/modeling_tvp.py +25 -27
  1366. transformers/models/tvp/processing_tvp.py +2 -14
  1367. transformers/models/udop/configuration_udop.py +1 -1
  1368. transformers/models/udop/modeling_udop.py +63 -70
  1369. transformers/models/udop/processing_udop.py +7 -26
  1370. transformers/models/udop/tokenization_udop.py +80 -93
  1371. transformers/models/umt5/configuration_umt5.py +2 -3
  1372. transformers/models/umt5/modeling_umt5.py +80 -87
  1373. transformers/models/unispeech/configuration_unispeech.py +0 -1
  1374. transformers/models/unispeech/modeling_unispeech.py +47 -49
  1375. transformers/models/unispeech/modular_unispeech.py +20 -22
  1376. transformers/models/unispeech_sat/configuration_unispeech_sat.py +0 -1
  1377. transformers/models/unispeech_sat/modeling_unispeech_sat.py +63 -65
  1378. transformers/models/unispeech_sat/modular_unispeech_sat.py +21 -23
  1379. transformers/models/univnet/feature_extraction_univnet.py +14 -14
  1380. transformers/models/univnet/modeling_univnet.py +7 -8
  1381. transformers/models/upernet/configuration_upernet.py +0 -1
  1382. transformers/models/upernet/modeling_upernet.py +10 -13
  1383. transformers/models/vaultgemma/__init__.py +0 -1
  1384. transformers/models/vaultgemma/configuration_vaultgemma.py +24 -26
  1385. transformers/models/vaultgemma/modeling_vaultgemma.py +35 -37
  1386. transformers/models/vaultgemma/modular_vaultgemma.py +29 -31
  1387. transformers/models/video_llama_3/image_processing_video_llama_3.py +43 -42
  1388. transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +8 -8
  1389. transformers/models/video_llama_3/modeling_video_llama_3.py +77 -66
  1390. transformers/models/video_llama_3/modular_video_llama_3.py +110 -112
  1391. transformers/models/video_llama_3/processing_video_llama_3.py +5 -39
  1392. transformers/models/video_llama_3/video_processing_video_llama_3.py +18 -18
  1393. transformers/models/video_llava/configuration_video_llava.py +0 -1
  1394. transformers/models/video_llava/image_processing_video_llava.py +35 -38
  1395. transformers/models/video_llava/modeling_video_llava.py +59 -57
  1396. transformers/models/video_llava/processing_video_llava.py +38 -78
  1397. transformers/models/video_llava/video_processing_video_llava.py +0 -1
  1398. transformers/models/videomae/configuration_videomae.py +0 -1
  1399. transformers/models/videomae/image_processing_videomae.py +31 -34
  1400. transformers/models/videomae/modeling_videomae.py +13 -15
  1401. transformers/models/videomae/video_processing_videomae.py +0 -1
  1402. transformers/models/vilt/configuration_vilt.py +2 -3
  1403. transformers/models/vilt/image_processing_vilt.py +29 -30
  1404. transformers/models/vilt/image_processing_vilt_fast.py +9 -10
  1405. transformers/models/vilt/modeling_vilt.py +83 -78
  1406. transformers/models/vilt/processing_vilt.py +2 -14
  1407. transformers/models/vipllava/configuration_vipllava.py +0 -1
  1408. transformers/models/vipllava/modeling_vipllava.py +45 -42
  1409. transformers/models/vipllava/modular_vipllava.py +30 -32
  1410. transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +0 -1
  1411. transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +18 -21
  1412. transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +0 -1
  1413. transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +18 -21
  1414. transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +2 -16
  1415. transformers/models/visual_bert/configuration_visual_bert.py +0 -1
  1416. transformers/models/visual_bert/modeling_visual_bert.py +92 -92
  1417. transformers/models/vit/configuration_vit.py +0 -1
  1418. transformers/models/vit/image_processing_vit.py +19 -22
  1419. transformers/models/vit/image_processing_vit_fast.py +0 -1
  1420. transformers/models/vit/modeling_vit.py +13 -15
  1421. transformers/models/vit_mae/configuration_vit_mae.py +0 -1
  1422. transformers/models/vit_mae/modeling_vit_mae.py +21 -23
  1423. transformers/models/vit_msn/configuration_vit_msn.py +0 -1
  1424. transformers/models/vit_msn/modeling_vit_msn.py +10 -12
  1425. transformers/models/vitdet/configuration_vitdet.py +0 -1
  1426. transformers/models/vitdet/modeling_vitdet.py +12 -14
  1427. transformers/models/vitmatte/configuration_vitmatte.py +2 -5
  1428. transformers/models/vitmatte/image_processing_vitmatte.py +15 -18
  1429. transformers/models/vitmatte/image_processing_vitmatte_fast.py +14 -16
  1430. transformers/models/vitmatte/modeling_vitmatte.py +13 -11
  1431. transformers/models/vitpose/configuration_vitpose.py +4 -7
  1432. transformers/models/vitpose/image_processing_vitpose.py +24 -25
  1433. transformers/models/vitpose/image_processing_vitpose_fast.py +9 -11
  1434. transformers/models/vitpose/modeling_vitpose.py +10 -12
  1435. transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +0 -1
  1436. transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +8 -10
  1437. transformers/models/vits/configuration_vits.py +0 -1
  1438. transformers/models/vits/modeling_vits.py +34 -35
  1439. transformers/models/vits/tokenization_vits.py +3 -4
  1440. transformers/models/vivit/configuration_vivit.py +0 -1
  1441. transformers/models/vivit/image_processing_vivit.py +36 -39
  1442. transformers/models/vivit/modeling_vivit.py +5 -7
  1443. transformers/models/vjepa2/__init__.py +0 -1
  1444. transformers/models/vjepa2/configuration_vjepa2.py +0 -1
  1445. transformers/models/vjepa2/modeling_vjepa2.py +30 -32
  1446. transformers/models/vjepa2/video_processing_vjepa2.py +0 -1
  1447. transformers/models/voxtral/__init__.py +0 -1
  1448. transformers/models/voxtral/configuration_voxtral.py +0 -1
  1449. transformers/models/voxtral/modeling_voxtral.py +19 -27
  1450. transformers/models/voxtral/modular_voxtral.py +12 -21
  1451. transformers/models/voxtral/processing_voxtral.py +25 -48
  1452. transformers/models/wav2vec2/configuration_wav2vec2.py +0 -1
  1453. transformers/models/wav2vec2/feature_extraction_wav2vec2.py +7 -10
  1454. transformers/models/wav2vec2/modeling_wav2vec2.py +67 -122
  1455. transformers/models/wav2vec2/processing_wav2vec2.py +6 -35
  1456. transformers/models/wav2vec2/tokenization_wav2vec2.py +20 -332
  1457. transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +0 -1
  1458. transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +65 -62
  1459. transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +52 -48
  1460. transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +6 -35
  1461. transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +0 -1
  1462. transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +84 -77
  1463. transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +37 -30
  1464. transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +16 -17
  1465. transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +36 -55
  1466. transformers/models/wavlm/configuration_wavlm.py +0 -1
  1467. transformers/models/wavlm/modeling_wavlm.py +45 -48
  1468. transformers/models/wavlm/modular_wavlm.py +4 -5
  1469. transformers/models/whisper/configuration_whisper.py +0 -1
  1470. transformers/models/whisper/english_normalizer.py +3 -4
  1471. transformers/models/whisper/feature_extraction_whisper.py +9 -24
  1472. transformers/models/whisper/generation_whisper.py +27 -48
  1473. transformers/models/whisper/modeling_whisper.py +73 -73
  1474. transformers/models/whisper/processing_whisper.py +3 -20
  1475. transformers/models/whisper/tokenization_whisper.py +9 -30
  1476. transformers/models/x_clip/configuration_x_clip.py +0 -1
  1477. transformers/models/x_clip/modeling_x_clip.py +70 -69
  1478. transformers/models/x_clip/processing_x_clip.py +2 -14
  1479. transformers/models/xcodec/configuration_xcodec.py +4 -6
  1480. transformers/models/xcodec/modeling_xcodec.py +20 -17
  1481. transformers/models/xglm/configuration_xglm.py +0 -1
  1482. transformers/models/xglm/modeling_xglm.py +59 -55
  1483. transformers/models/xglm/tokenization_xglm.py +1 -4
  1484. transformers/models/xlm/configuration_xlm.py +0 -1
  1485. transformers/models/xlm/modeling_xlm.py +139 -144
  1486. transformers/models/xlm/tokenization_xlm.py +3 -5
  1487. transformers/models/xlm_roberta/configuration_xlm_roberta.py +0 -1
  1488. transformers/models/xlm_roberta/modeling_xlm_roberta.py +195 -194
  1489. transformers/models/xlm_roberta/modular_xlm_roberta.py +50 -53
  1490. transformers/models/xlm_roberta/tokenization_xlm_roberta.py +1 -4
  1491. transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +0 -1
  1492. transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +94 -93
  1493. transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py +67 -70
  1494. transformers/models/xlnet/configuration_xlnet.py +0 -11
  1495. transformers/models/xlnet/modeling_xlnet.py +152 -163
  1496. transformers/models/xlnet/tokenization_xlnet.py +1 -4
  1497. transformers/models/xlstm/configuration_xlstm.py +3 -5
  1498. transformers/models/xlstm/modeling_xlstm.py +62 -65
  1499. transformers/models/xmod/configuration_xmod.py +0 -1
  1500. transformers/models/xmod/modeling_xmod.py +101 -100
  1501. transformers/models/yolos/configuration_yolos.py +0 -1
  1502. transformers/models/yolos/image_processing_yolos.py +60 -62
  1503. transformers/models/yolos/image_processing_yolos_fast.py +18 -18
  1504. transformers/models/yolos/modeling_yolos.py +12 -14
  1505. transformers/models/yolos/modular_yolos.py +2 -4
  1506. transformers/models/yoso/configuration_yoso.py +0 -1
  1507. transformers/models/yoso/modeling_yoso.py +64 -63
  1508. transformers/models/zamba/configuration_zamba.py +0 -1
  1509. transformers/models/zamba/modeling_zamba.py +70 -70
  1510. transformers/models/zamba2/configuration_zamba2.py +36 -37
  1511. transformers/models/zamba2/modeling_zamba2.py +87 -89
  1512. transformers/models/zamba2/modular_zamba2.py +43 -45
  1513. transformers/models/zoedepth/configuration_zoedepth.py +1 -2
  1514. transformers/models/zoedepth/image_processing_zoedepth.py +28 -29
  1515. transformers/models/zoedepth/image_processing_zoedepth_fast.py +12 -15
  1516. transformers/models/zoedepth/modeling_zoedepth.py +21 -16
  1517. transformers/pipelines/__init__.py +59 -55
  1518. transformers/pipelines/any_to_any.py +14 -22
  1519. transformers/pipelines/audio_utils.py +1 -2
  1520. transformers/pipelines/automatic_speech_recognition.py +20 -12
  1521. transformers/pipelines/base.py +13 -17
  1522. transformers/pipelines/deprecated/__init__.py +0 -1
  1523. transformers/pipelines/document_question_answering.py +1 -1
  1524. transformers/pipelines/image_text_to_text.py +0 -1
  1525. transformers/pipelines/image_to_text.py +4 -44
  1526. transformers/pipelines/question_answering.py +5 -44
  1527. transformers/pipelines/text_classification.py +1 -14
  1528. transformers/pipelines/text_to_audio.py +2 -2
  1529. transformers/pipelines/token_classification.py +1 -22
  1530. transformers/pipelines/video_classification.py +1 -9
  1531. transformers/pipelines/zero_shot_audio_classification.py +0 -1
  1532. transformers/pipelines/zero_shot_classification.py +0 -6
  1533. transformers/pipelines/zero_shot_image_classification.py +0 -7
  1534. transformers/processing_utils.py +222 -151
  1535. transformers/quantizers/auto.py +2 -4
  1536. transformers/quantizers/base.py +19 -64
  1537. transformers/quantizers/quantizer_aqlm.py +1 -18
  1538. transformers/quantizers/quantizer_auto_round.py +1 -10
  1539. transformers/quantizers/quantizer_awq.py +3 -8
  1540. transformers/quantizers/quantizer_bitnet.py +1 -6
  1541. transformers/quantizers/quantizer_bnb_4bit.py +9 -49
  1542. transformers/quantizers/quantizer_bnb_8bit.py +9 -19
  1543. transformers/quantizers/quantizer_compressed_tensors.py +1 -4
  1544. transformers/quantizers/quantizer_eetq.py +2 -12
  1545. transformers/quantizers/quantizer_fbgemm_fp8.py +5 -14
  1546. transformers/quantizers/quantizer_finegrained_fp8.py +15 -10
  1547. transformers/quantizers/quantizer_fp_quant.py +4 -4
  1548. transformers/quantizers/quantizer_gptq.py +1 -4
  1549. transformers/quantizers/quantizer_higgs.py +2 -6
  1550. transformers/quantizers/quantizer_mxfp4.py +2 -28
  1551. transformers/quantizers/quantizer_quanto.py +14 -14
  1552. transformers/quantizers/quantizer_quark.py +0 -1
  1553. transformers/quantizers/quantizer_spqr.py +3 -8
  1554. transformers/quantizers/quantizer_torchao.py +31 -127
  1555. transformers/quantizers/quantizer_vptq.py +1 -10
  1556. transformers/testing_utils.py +31 -49
  1557. transformers/tokenization_mistral_common.py +554 -902
  1558. transformers/tokenization_utils_base.py +112 -124
  1559. transformers/tokenization_utils_sentencepiece.py +5 -6
  1560. transformers/tokenization_utils_tokenizers.py +30 -7
  1561. transformers/trainer.py +30 -11
  1562. transformers/trainer_callback.py +8 -0
  1563. transformers/trainer_jit_checkpoint.py +1 -2
  1564. transformers/trainer_seq2seq.py +4 -0
  1565. transformers/training_args.py +11 -13
  1566. transformers/utils/__init__.py +4 -0
  1567. transformers/utils/attention_visualizer.py +5 -5
  1568. transformers/utils/auto_docstring.py +598 -37
  1569. transformers/utils/doc.py +1 -1
  1570. transformers/utils/dummy_pt_objects.py +0 -42
  1571. transformers/utils/generic.py +21 -1
  1572. transformers/utils/import_utils.py +51 -9
  1573. transformers/utils/kernel_config.py +71 -18
  1574. transformers/utils/loading_report.py +3 -3
  1575. transformers/utils/quantization_config.py +16 -18
  1576. transformers/video_processing_utils.py +35 -32
  1577. transformers/video_utils.py +18 -22
  1578. {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc3.dist-info}/METADATA +23 -24
  1579. transformers-5.0.0rc3.dist-info/RECORD +2067 -0
  1580. transformers-5.0.0rc1.dist-info/RECORD +0 -2003
  1581. {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc3.dist-info}/WHEEL +0 -0
  1582. {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc3.dist-info}/entry_points.txt +0 -0
  1583. {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc3.dist-info}/licenses/LICENSE +0 -0
  1584. {transformers-5.0.0rc1.dist-info → transformers-5.0.0rc3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1895 @@
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_ernie4_5_vl_moe.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ import itertools
22
+ from collections.abc import Callable
23
+ from typing import Any, Optional
24
+
25
+ import torch
26
+ import torch.nn as nn
27
+ import torch.nn.functional as F
28
+
29
+ from ... import initialization as init
30
+ from ...activations import ACT2FN
31
+ from ...cache_utils import Cache, DynamicCache
32
+ from ...generation import GenerationMixin
33
+ from ...integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernelized_func
34
+ from ...masking_utils import create_causal_mask
35
+ from ...modeling_flash_attention_utils import FlashAttentionKwargs
36
+ from ...modeling_layers import GradientCheckpointingLayer
37
+ from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
38
+ from ...modeling_rope_utils import dynamic_rope_update
39
+ from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
40
+ from ...processing_utils import Unpack
41
+ from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling
42
+ from ...utils.generic import OutputRecorder, check_model_inputs, maybe_autocast
43
+ from .configuration_ernie4_5_vl_moe import (
44
+ Ernie4_5_VL_MoeConfig,
45
+ Ernie4_5_VL_MoeTextConfig,
46
+ Ernie4_5_VL_MoeVisionConfig,
47
+ )
48
+
49
+
50
+ class Ernie4_5_VL_MoeTextRotaryEmbedding(nn.Module):
51
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
52
+
53
+ def __init__(self, config, device=None):
54
+ super().__init__()
55
+ self.max_seq_len_cached = config.max_position_embeddings
56
+ self.original_max_seq_len = config.max_position_embeddings
57
+
58
+ self.config = config
59
+
60
+ self.rope_type = self.config.rope_parameters["rope_type"]
61
+ rope_init_fn: Callable = self.compute_default_rope_parameters
62
+ if self.rope_type != "default":
63
+ raise ValueError(f"Ernie 4.5 VL requires the `default` rope type, but found {self.rope_type} instead.")
64
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
65
+
66
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
67
+ self.original_inv_freq = inv_freq
68
+
69
+ self.mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
70
+
71
+ @staticmethod
72
+ def compute_default_rope_parameters(
73
+ config: Ernie4_5_VL_MoeTextConfig | None = None,
74
+ device: Optional["torch.device"] = None,
75
+ seq_len: int | None = None,
76
+ ) -> tuple["torch.Tensor", float]:
77
+ """
78
+ Computes the inverse frequencies according to the original RoPE implementation
79
+ Args:
80
+ config ([`~transformers.PreTrainedConfig`]):
81
+ The model configuration.
82
+ device (`torch.device`):
83
+ The device to use for initialization of the inverse frequencies.
84
+ seq_len (`int`, *optional*):
85
+ The current sequence length. Unused for this type of RoPE.
86
+ Returns:
87
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
88
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
89
+ """
90
+ base = config.rope_parameters["rope_theta"]
91
+ dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
92
+
93
+ attention_factor = 1.0 # Unused in this type of RoPE
94
+
95
+ # Compute the inverse frequencies
96
+ inv_freq = 1.0 / (
97
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
98
+ )
99
+
100
+ # Special to ernie, we prerotate on the hw dim
101
+ mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
102
+ hw_dim = mrope_section[0] + mrope_section[1]
103
+ t_dim = mrope_section[2]
104
+
105
+ inv_freq_3d = torch.empty_like(inv_freq)
106
+ # (Pre-)Rotate to avoid another rotation during the forward
107
+ inv_freq_3d[:hw_dim] = torch.cat([inv_freq[:-t_dim][0::2], inv_freq[:-t_dim][1::2]])
108
+ inv_freq_3d[-t_dim:] = inv_freq[-t_dim:]
109
+
110
+ return inv_freq_3d, attention_factor
111
+
112
+ @torch.no_grad()
113
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
114
+ def forward(self, x, position_ids):
115
+ inv_freq_expanded = (
116
+ self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1).to(x.device)
117
+ )
118
+ position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
119
+
120
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
121
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
122
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
123
+ cos = freqs.cos() * self.attention_scaling
124
+ sin = freqs.sin() * self.attention_scaling
125
+
126
+ sin = self.recomposition_to_3d(sin)
127
+ cos = self.recomposition_to_3d(cos)
128
+
129
+ return cos, sin
130
+
131
+ def recomposition_to_3d(self, freq):
132
+ freq_h, freq_w, freq_t = (m[(i + 1) % 3] for i, m in enumerate(freq.split([*self.mrope_section], dim=-1)))
133
+ freq_hw = torch.stack([freq_h, freq_w], dim=-1).flatten(-2)
134
+ freq_hwt = torch.cat([freq_hw, freq_t], dim=-1)
135
+ return freq_hwt.repeat_interleave(2, dim=-1)
136
+
137
+
138
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
139
+ """
140
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
141
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
142
+ """
143
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
144
+ if n_rep == 1:
145
+ return hidden_states
146
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
147
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
148
+
149
+
150
+ def eager_attention_forward(
151
+ module: nn.Module,
152
+ query: torch.Tensor,
153
+ key: torch.Tensor,
154
+ value: torch.Tensor,
155
+ attention_mask: torch.Tensor | None,
156
+ scaling: float,
157
+ dropout: float = 0.0,
158
+ **kwargs: Unpack[TransformersKwargs],
159
+ ):
160
+ key_states = repeat_kv(key, module.num_key_value_groups)
161
+ value_states = repeat_kv(value, module.num_key_value_groups)
162
+
163
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
164
+ if attention_mask is not None:
165
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
166
+ attn_weights = attn_weights + causal_mask
167
+
168
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
169
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
170
+ attn_output = torch.matmul(attn_weights, value_states)
171
+ attn_output = attn_output.transpose(1, 2).contiguous()
172
+
173
+ return attn_output, attn_weights
174
+
175
+
176
+ def rotate_half_text(x):
177
+ """Rotates half the hidden dims of the input."""
178
+ x1 = x[..., 0::2]
179
+ x2 = x[..., 1::2]
180
+ return torch.stack((-x2, x1), dim=-1).flatten(-2)
181
+
182
+
183
+ def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
184
+ """Applies Rotary Position Embedding to the query and key tensors.
185
+
186
+ Args:
187
+ q (`torch.Tensor`): The query tensor.
188
+ k (`torch.Tensor`): The key tensor.
189
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
190
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
191
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
192
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
193
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
194
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
195
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
196
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
197
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
198
+ Returns:
199
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
200
+ """
201
+ original_dtype = q.dtype
202
+
203
+ cos = cos.unsqueeze(unsqueeze_dim)
204
+ sin = sin.unsqueeze(unsqueeze_dim)
205
+
206
+ q_embed = (q.float() * cos) + (rotate_half_text(q).float() * sin)
207
+ k_embed = (k.float() * cos) + (rotate_half_text(k).float() * sin)
208
+
209
+ return q_embed.to(original_dtype), k_embed.to(original_dtype)
210
+
211
+
212
+ @use_kernelized_func(apply_rotary_pos_emb)
213
+ class Ernie4_5_VL_MoeTextAttention(nn.Module):
214
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
215
+
216
+ def __init__(self, config: Ernie4_5_VL_MoeConfig, layer_idx: int):
217
+ super().__init__()
218
+ self.config = config
219
+ self.layer_idx = layer_idx
220
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
221
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
222
+ self.scaling = self.head_dim**-0.5
223
+
224
+ self.attention_dropout = 0.0
225
+ self.is_causal = True
226
+
227
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
228
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
229
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
230
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
231
+
232
+ def forward(
233
+ self,
234
+ hidden_states: torch.Tensor,
235
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
236
+ attention_mask: torch.Tensor | None = None,
237
+ past_key_values: Cache | None = None,
238
+ cache_position: torch.LongTensor | None = None,
239
+ **kwargs: Unpack[TransformersKwargs],
240
+ ) -> tuple[torch.Tensor, torch.Tensor]:
241
+ input_shape = hidden_states.shape[:-1]
242
+ hidden_shape = (*input_shape, -1, self.head_dim)
243
+
244
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
245
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
246
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
247
+
248
+ cos, sin = position_embeddings
249
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
250
+
251
+ if past_key_values is not None:
252
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
253
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
254
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
255
+
256
+ attention_interface: Callable = eager_attention_forward
257
+ if self.config._attn_implementation != "eager":
258
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
259
+
260
+ attn_output, attn_weights = attention_interface(
261
+ self,
262
+ query_states,
263
+ key_states,
264
+ value_states,
265
+ attention_mask,
266
+ dropout=0.0 if not self.training else self.attention_dropout,
267
+ scaling=self.scaling,
268
+ **kwargs,
269
+ )
270
+
271
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
272
+ attn_output = self.o_proj(attn_output)
273
+ return attn_output, attn_weights
274
+
275
+
276
+ @use_kernel_forward_from_hub("RMSNorm")
277
+ class Ernie4_5_VL_MoeRMSNorm(nn.Module):
278
+ def __init__(self, hidden_size, eps=1e-6):
279
+ """
280
+ Ernie4_5_VL_MoeRMSNorm is equivalent to T5LayerNorm
281
+ """
282
+ super().__init__()
283
+ self.weight = nn.Parameter(torch.ones(hidden_size))
284
+ self.variance_epsilon = eps
285
+
286
+ def forward(self, hidden_states):
287
+ input_dtype = hidden_states.dtype
288
+ hidden_states = hidden_states.to(torch.float32)
289
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
290
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
291
+ return self.weight * hidden_states.to(input_dtype)
292
+
293
+ def extra_repr(self):
294
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
295
+
296
+
297
+ class Ernie4_5_VL_MoeMLP(nn.Module):
298
+ def __init__(self, config, intermediate_size=None):
299
+ super().__init__()
300
+ self.config = config
301
+ self.hidden_size = config.hidden_size
302
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
303
+
304
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
305
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
306
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
307
+ self.act_fn = ACT2FN[config.hidden_act]
308
+
309
+ def forward(self, x):
310
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
311
+ return down_proj
312
+
313
+
314
+ class Ernie4_5_VL_MoeMoeStatics(nn.Module):
315
+ """
316
+ Stores MoE (Mixture of Experts) statistics
317
+ - Bias for the gating
318
+ - Additionally, usage per expert in the original codebase
319
+ """
320
+
321
+ def __init__(self, config):
322
+ super().__init__()
323
+
324
+ num_experts_groups = 1
325
+ num_experts = config.moe_num_experts
326
+
327
+ self.e_score_correction_bias = nn.Parameter(
328
+ torch.zeros(num_experts_groups, num_experts, dtype=torch.float32),
329
+ requires_grad=False,
330
+ )
331
+
332
+ def forward(self, hidden_states):
333
+ # NOTE: This is a workaround to enable TP with a module that only has parameters
334
+ #
335
+ # Otherwise, it stays as `DTensor` when called in the "super" forward
336
+ # 1. All other tensors are local (`torch.Tensor`)
337
+ # 2. Isolate does not work on `nn.Module` which only has parameters
338
+ return hidden_states + self.e_score_correction_bias.squeeze()
339
+
340
+
341
+ class Ernie4_5_VL_MoeMoeTopKRouter(nn.Module):
342
+ def __init__(self, config):
343
+ super().__init__()
344
+ self.weight = nn.Parameter(torch.zeros(config.moe_num_experts, config.hidden_size, dtype=torch.float32))
345
+ self.moe_statics = Ernie4_5_VL_MoeMoeStatics(config)
346
+ self.top_k = config.moe_k
347
+ self.norm_min = config.moe_norm_min
348
+
349
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
350
+ device_type = (
351
+ hidden_states.device.type
352
+ if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps"
353
+ else "cpu"
354
+ )
355
+
356
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
357
+ router_logits = F.linear(hidden_states.float(), self.weight.float())
358
+ routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
359
+ _, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1)
360
+ routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts)
361
+ routing_weights = routing_weights / torch.clamp(
362
+ routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min
363
+ )
364
+ routing_weights = routing_weights.to(hidden_states.dtype)
365
+ return router_logits, selected_experts, routing_weights
366
+
367
+
368
+ @use_experts_implementation
369
+ class Ernie4_5_VL_MoeMoeExperts(nn.Module):
370
+ """Collection of expert weights stored as 3D tensors."""
371
+
372
+ def __init__(self, config, intermediate_size=None):
373
+ super().__init__()
374
+ self.num_experts = config.moe_num_experts
375
+ self.hidden_dim = config.hidden_size
376
+ self.intermediate_dim = config.moe_intermediate_size if intermediate_size is None else intermediate_size
377
+ self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
378
+ self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
379
+ self.act_fn = ACT2FN[config.hidden_act]
380
+
381
+ def forward(
382
+ self,
383
+ hidden_states: torch.Tensor,
384
+ top_k_index: torch.Tensor,
385
+ top_k_weights: torch.Tensor,
386
+ ) -> torch.Tensor:
387
+ final_hidden_states = torch.zeros_like(hidden_states)
388
+ with torch.no_grad():
389
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
390
+ expert_mask = expert_mask.permute(2, 1, 0)
391
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
392
+
393
+ for expert_idx in expert_hit:
394
+ expert_idx = expert_idx[0]
395
+ if expert_idx == self.num_experts:
396
+ continue
397
+ top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
398
+ current_state = hidden_states[token_idx]
399
+ gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
400
+ current_hidden_states = self.act_fn(gate) * up
401
+ current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
402
+ current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
403
+ final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
404
+
405
+ return final_hidden_states
406
+
407
+
408
+ class Ernie4_5_VL_MoeSparseMoeBlock(nn.Module):
409
+ def __init__(self, config, intermediate_size):
410
+ super().__init__()
411
+ self.hidden_dim = config.hidden_size
412
+ self.num_experts = config.moe_num_experts
413
+ self.top_k = config.moe_k
414
+ self.gate = Ernie4_5_VL_MoeMoeTopKRouter(config)
415
+ self.experts = Ernie4_5_VL_MoeMoeExperts(config, intermediate_size)
416
+
417
+ def forward(
418
+ self,
419
+ hidden_states: torch.Tensor,
420
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
421
+ hidden_states = hidden_states.view(-1, self.hidden_dim)
422
+
423
+ router_logits, top_k_index, top_k_weights = self.gate(hidden_states)
424
+ final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
425
+
426
+ # moe results are changed to a flattened shape to ease the modality isolated assigning of results
427
+ return final_hidden_states.flatten(), router_logits.flatten()
428
+
429
+
430
+ class Ernie4_5_VL_MoeMoeBlock(nn.Module):
431
+ """
432
+ Similar to `Ernie4_5_Moe` where we have modality isolated experts:
433
+ - A set of text experts that are only run on text tokens
434
+ - A set of vision experts that are only run on vision (image/video) tokens
435
+
436
+ This modality isolation is unique to the Ernie 4.5 VL Moe models.
437
+ """
438
+
439
+ def __init__(self, config):
440
+ super().__init__()
441
+ self.num_experts = config.moe_num_experts
442
+
443
+ self.text_moe = Ernie4_5_VL_MoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[0])
444
+ self.vision_moe = Ernie4_5_VL_MoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[1])
445
+
446
+ self.shared_experts = None
447
+ if config.moe_num_shared_experts > 0:
448
+ self.shared_experts = Ernie4_5_VL_MoeMLP(
449
+ config, config.moe_intermediate_size[0] * config.moe_num_shared_experts
450
+ )
451
+
452
+ def forward(
453
+ self,
454
+ hidden_states: torch.Tensor,
455
+ moe_mm_token_type_ids: torch.IntTensor | None = None,
456
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
457
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
458
+
459
+ # (Optional) shared experts
460
+ if self.shared_experts is not None:
461
+ shared_output = self.shared_experts(hidden_states)
462
+
463
+ if moe_mm_token_type_ids is not None and moe_mm_token_type_ids.any():
464
+ final_hidden_states = torch.zeros_like(hidden_states)
465
+ router_logits = torch.zeros(
466
+ size=(batch_size * sequence_length, self.num_experts),
467
+ device=final_hidden_states.device,
468
+ dtype=torch.float,
469
+ )
470
+
471
+ # True (1 or 2) == vision, False (0) == text tokens
472
+ moe_mm_token_type_ids = moe_mm_token_type_ids.bool()
473
+ token_type_ids_router = moe_mm_token_type_ids.reshape(-1)[:, None].expand(-1, self.num_experts)
474
+ token_type_ids_states = moe_mm_token_type_ids[..., None].expand(-1, -1, hidden_dim)
475
+
476
+ # Run moe on each modality and assign their results to the original token positions
477
+ final_hidden_states[~token_type_ids_states], router_logits[~token_type_ids_router] = self.text_moe(
478
+ hidden_states[~token_type_ids_states]
479
+ )
480
+ final_hidden_states[token_type_ids_states], router_logits[token_type_ids_router] = self.vision_moe(
481
+ hidden_states[token_type_ids_states]
482
+ )
483
+ else:
484
+ final_hidden_states, router_logits = self.text_moe(hidden_states)
485
+ final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
486
+ router_logits = router_logits.reshape(-1, self.num_experts)
487
+
488
+ # Add (optional) shared experts to the result
489
+ if self.shared_experts is not None:
490
+ final_hidden_states = final_hidden_states + shared_output
491
+
492
+ return final_hidden_states, router_logits
493
+
494
+
495
+ class Ernie4_5_VL_MoeDecoderLayer(GradientCheckpointingLayer):
496
+ def __init__(self, config, layer_idx):
497
+ super().__init__()
498
+ self.hidden_size = config.hidden_size
499
+
500
+ self.self_attn = Ernie4_5_VL_MoeTextAttention(config, layer_idx)
501
+
502
+ if config.mlp_layer_types[layer_idx] == "sparse":
503
+ self.mlp = Ernie4_5_VL_MoeMoeBlock(config)
504
+ else:
505
+ self.mlp = Ernie4_5_VL_MoeMLP(config)
506
+
507
+ self.input_layernorm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
508
+ self.post_attention_layernorm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
509
+
510
+ def forward(
511
+ self,
512
+ hidden_states: torch.Tensor,
513
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
514
+ attention_mask: torch.Tensor | None = None,
515
+ position_ids: torch.Tensor | None = None,
516
+ moe_mm_token_type_ids: torch.IntTensor | None = None,
517
+ past_key_values: Cache | None = None,
518
+ cache_position: torch.LongTensor | None = None,
519
+ **kwargs: Unpack[FlashAttentionKwargs],
520
+ ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
521
+ residual = hidden_states
522
+
523
+ hidden_states = self.input_layernorm(hidden_states)
524
+
525
+ # Self Attention
526
+ hidden_states, _ = self.self_attn(
527
+ hidden_states=hidden_states,
528
+ position_embeddings=position_embeddings,
529
+ attention_mask=attention_mask,
530
+ position_ids=position_ids,
531
+ past_key_values=past_key_values,
532
+ cache_position=cache_position,
533
+ **kwargs,
534
+ )
535
+ hidden_states = hidden_states + residual
536
+
537
+ # Fully Connected
538
+ residual = hidden_states
539
+ hidden_states = self.post_attention_layernorm(hidden_states)
540
+ if isinstance(self.mlp, Ernie4_5_VL_MoeMoeBlock):
541
+ hidden_states, _ = self.mlp(hidden_states, moe_mm_token_type_ids)
542
+ else:
543
+ hidden_states = self.mlp(hidden_states)
544
+ hidden_states = hidden_states + residual
545
+
546
+ return hidden_states
547
+
548
+
549
+ @auto_docstring
550
+ class Ernie4_5_VL_MoePreTrainedModel(PreTrainedModel):
551
+ config: Ernie4_5_VL_MoeConfig
552
+ base_model_prefix = "model"
553
+ input_modalities = ("image", "video", "text")
554
+ supports_gradient_checkpointing = True
555
+ _no_split_modules = ["Ernie4_5_VL_MoeDecoderLayer", "Ernie4_5_VL_MoeVisionBlock"]
556
+ _skip_keys_device_placement = "past_key_values"
557
+ _supports_flash_attn = True
558
+ _supports_sdpa = True
559
+ _can_compile_fullgraph = False
560
+ _supports_attention_backend = True
561
+
562
+ _can_record_outputs = {
563
+ "router_logits": OutputRecorder(Ernie4_5_VL_MoeMoeBlock, index=1),
564
+ "hidden_states": Ernie4_5_VL_MoeDecoderLayer,
565
+ "attentions": Ernie4_5_VL_MoeTextAttention,
566
+ }
567
+ _keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
568
+
569
+ def _init_weights(self, module):
570
+ super()._init_weights(module)
571
+ if isinstance(module, Ernie4_5_VL_MoeMoeTopKRouter):
572
+ init.zeros_(module.moe_statics.e_score_correction_bias)
573
+ init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
574
+ elif isinstance(module, Ernie4_5_VL_MoeMoeExperts):
575
+ init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
576
+ init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
577
+ elif isinstance(module, Ernie4_5_VL_MoeVisionRotaryEmbedding):
578
+ inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
579
+ init.copy_(module.inv_freq, inv_freq)
580
+
581
+
582
+ @auto_docstring
583
+ class Ernie4_5_VL_MoeTextModel(Ernie4_5_VL_MoePreTrainedModel):
584
+ config: Ernie4_5_VL_MoeTextConfig
585
+
586
+ def __init__(self, config: Ernie4_5_VL_MoeTextConfig):
587
+ super().__init__(config)
588
+ self.padding_idx = config.pad_token_id
589
+ self.vocab_size = config.vocab_size
590
+
591
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
592
+ self.layers = nn.ModuleList(
593
+ [Ernie4_5_VL_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
594
+ )
595
+ self.norm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
596
+ self.rotary_emb = Ernie4_5_VL_MoeTextRotaryEmbedding(config=config)
597
+ self.gradient_checkpointing = False
598
+
599
+ # Initialize weights and apply final processing
600
+ self.post_init()
601
+
602
+ @check_model_inputs
603
+ @auto_docstring
604
+ def forward(
605
+ self,
606
+ input_ids: torch.LongTensor | None = None,
607
+ attention_mask: torch.Tensor | None = None,
608
+ position_ids: torch.LongTensor | None = None,
609
+ moe_mm_token_type_ids: torch.IntTensor | None = None,
610
+ past_key_values: Cache | None = None,
611
+ inputs_embeds: torch.FloatTensor | None = None,
612
+ use_cache: bool | None = None,
613
+ cache_position: torch.LongTensor | None = None,
614
+ **kwargs: Unpack[FlashAttentionKwargs],
615
+ ) -> MoeModelOutputWithPast:
616
+ r"""
617
+ moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
618
+ The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
619
+ """
620
+ if (input_ids is None) ^ (inputs_embeds is not None):
621
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
622
+
623
+ if use_cache and past_key_values is None:
624
+ past_key_values = DynamicCache(config=self.config)
625
+
626
+ if inputs_embeds is None:
627
+ inputs_embeds = self.embed_tokens(input_ids)
628
+
629
+ if cache_position is None:
630
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
631
+ cache_position = torch.arange(
632
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
633
+ )
634
+
635
+ # the hard coded `3` is for temporal, height and width.
636
+ if position_ids is None:
637
+ position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
638
+ elif position_ids.ndim == 2:
639
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
640
+
641
+ # NOTE: we need to pass text position ids for packing. Ernie 4.5 VL uses 3D positions
642
+ # where each dim indicates visual spatial positions for temporal/height/width grids.
643
+ # There are is only one scenario when FA2-like packed masking might be activated.
644
+ # 1. User specifically passed packed `position_ids` and no attention mask.
645
+ # In this case we expect the useer to create correct position ids for all 3 grids
646
+ # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
647
+ if position_ids.ndim == 3 and position_ids.shape[0] == 4:
648
+ text_position_ids = position_ids[0]
649
+ position_ids = position_ids[1:]
650
+ else:
651
+ # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
652
+ text_position_ids = None
653
+
654
+ attention_mask = create_causal_mask(
655
+ config=self.config,
656
+ input_embeds=inputs_embeds,
657
+ attention_mask=attention_mask,
658
+ cache_position=cache_position,
659
+ past_key_values=past_key_values,
660
+ position_ids=text_position_ids,
661
+ )
662
+
663
+ hidden_states = inputs_embeds
664
+
665
+ # create position embeddings to be shared across the decoder layers
666
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
667
+
668
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
669
+ hidden_states = decoder_layer(
670
+ hidden_states,
671
+ position_embeddings=position_embeddings,
672
+ attention_mask=attention_mask,
673
+ position_ids=position_ids,
674
+ moe_mm_token_type_ids=moe_mm_token_type_ids,
675
+ past_key_values=past_key_values,
676
+ cache_position=cache_position,
677
+ **kwargs,
678
+ )
679
+
680
+ hidden_states = self.norm(hidden_states)
681
+
682
+ return MoeModelOutputWithPast(
683
+ last_hidden_state=hidden_states,
684
+ past_key_values=past_key_values,
685
+ )
686
+
687
+
688
+ class Ernie4_5VLVisionMLP(nn.Module):
689
+ def __init__(self, dim: int, hidden_dim: int, hidden_act: str) -> None:
690
+ super().__init__()
691
+ self.fc1 = nn.Linear(dim, hidden_dim)
692
+ self.act = ACT2FN[hidden_act]
693
+ self.fc2 = nn.Linear(hidden_dim, dim)
694
+
695
+ def forward(self, x) -> torch.Tensor:
696
+ return self.fc2(self.act(self.fc1(x)))
697
+
698
+
699
+ class Ernie4_5_VL_MoePatchEmbed(nn.Module):
700
+ def __init__(
701
+ self,
702
+ patch_size: int = 14,
703
+ in_channels: int = 3,
704
+ embed_dim: int = 1152,
705
+ ) -> None:
706
+ super().__init__()
707
+ self.patch_size = patch_size
708
+ self.in_channels = in_channels
709
+ self.embed_dim = embed_dim
710
+ self.proj = nn.Linear(in_channels * patch_size * patch_size, embed_dim, bias=False)
711
+
712
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
713
+ target_dtype = self.proj.weight.dtype
714
+ return self.proj(hidden_states.to(target_dtype))
715
+
716
+
717
+ class Ernie4_5_VL_MoeVisionRotaryEmbedding(nn.Module):
718
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
719
+
720
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
721
+ super().__init__()
722
+ self.dim = dim
723
+ self.theta = theta
724
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
725
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
726
+
727
+ def forward(self, seqlen: int) -> torch.Tensor:
728
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
729
+ freqs = torch.outer(seq, self.inv_freq)
730
+ return freqs
731
+
732
+
733
+ def rotate_half(x):
734
+ """Rotates half the hidden dims of the input."""
735
+ x1 = x[..., : x.shape[-1] // 2]
736
+ x2 = x[..., x.shape[-1] // 2 :]
737
+ return torch.cat((-x2, x1), dim=-1)
738
+
739
+
740
+ def apply_rotary_pos_emb_vision(
741
+ q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
742
+ ) -> tuple[torch.Tensor, torch.Tensor]:
743
+ orig_q_dtype = q.dtype
744
+ orig_k_dtype = k.dtype
745
+ q, k = q.float(), k.float()
746
+ cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
747
+ q_embed = (q * cos) + (rotate_half(q) * sin)
748
+ k_embed = (k * cos) + (rotate_half(k) * sin)
749
+ q_embed = q_embed.to(orig_q_dtype)
750
+ k_embed = k_embed.to(orig_k_dtype)
751
+ return q_embed, k_embed
752
+
753
+
754
+ class Ernie4_5_VL_MoeVisionAttention(nn.Module):
755
+ def __init__(self, config: Ernie4_5_VL_MoeVisionConfig) -> None:
756
+ super().__init__()
757
+ self.dim = config.hidden_size
758
+ self.num_heads = config.num_heads
759
+ self.head_dim = self.dim // self.num_heads
760
+ self.num_key_value_groups = 1 # needed for eager attention
761
+ self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
762
+ self.proj = nn.Linear(self.dim, self.dim)
763
+ self.scaling = self.head_dim**-0.5
764
+ self.config = config
765
+ self.attention_dropout = 0.0
766
+ self.is_causal = False
767
+
768
+ def forward(
769
+ self,
770
+ hidden_states: torch.Tensor,
771
+ cu_seqlens: torch.Tensor,
772
+ rotary_pos_emb: torch.Tensor | None = None,
773
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
774
+ **kwargs,
775
+ ) -> torch.Tensor:
776
+ seq_length = hidden_states.shape[0]
777
+ query_states, key_states, value_states = (
778
+ self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
779
+ )
780
+ cos, sin = position_embeddings
781
+ query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
782
+
783
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
784
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
785
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
786
+
787
+ attention_interface: Callable = eager_attention_forward
788
+ if self.config._attn_implementation != "eager":
789
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
790
+
791
+ if "flash" in self.config._attn_implementation:
792
+ # Flash Attention: Use cu_seqlens for variable length attention
793
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
794
+ attn_output, _ = attention_interface(
795
+ self,
796
+ query_states,
797
+ key_states,
798
+ value_states,
799
+ attention_mask=None,
800
+ scaling=self.scaling,
801
+ dropout=0.0 if not self.training else self.attention_dropout,
802
+ cu_seq_lens_q=cu_seqlens,
803
+ cu_seq_lens_k=cu_seqlens,
804
+ max_length_q=max_seqlen,
805
+ max_length_k=max_seqlen,
806
+ is_causal=False,
807
+ **kwargs,
808
+ )
809
+ else:
810
+ # Other implementations: Process each chunk separately
811
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
812
+ splits = [
813
+ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
814
+ ]
815
+
816
+ attn_outputs = [
817
+ attention_interface(
818
+ self,
819
+ q,
820
+ k,
821
+ v,
822
+ attention_mask=None,
823
+ scaling=self.scaling,
824
+ dropout=0.0 if not self.training else self.attention_dropout,
825
+ is_causal=False,
826
+ **kwargs,
827
+ )[0]
828
+ for q, k, v in zip(*splits)
829
+ ]
830
+ attn_output = torch.cat(attn_outputs, dim=1)
831
+
832
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
833
+ attn_output = self.proj(attn_output)
834
+ return attn_output
835
+
836
+
837
+ class Ernie4_5_VL_MoeVisionBlock(GradientCheckpointingLayer):
838
+ def __init__(self, config) -> None:
839
+ super().__init__()
840
+
841
+ self.norm1 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
842
+ self.norm2 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
843
+ self.attn = Ernie4_5_VL_MoeVisionAttention(config=config)
844
+ self.mlp = Ernie4_5VLVisionMLP(
845
+ dim=config.hidden_size,
846
+ hidden_dim=config.intermediate_size,
847
+ hidden_act=config.hidden_act,
848
+ )
849
+
850
+ def forward(
851
+ self,
852
+ hidden_states: torch.Tensor,
853
+ cu_seqlens: torch.Tensor,
854
+ rotary_pos_emb: torch.Tensor | None = None,
855
+ position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
856
+ **kwargs,
857
+ ) -> torch.Tensor:
858
+ hidden_states = hidden_states + self.attn(
859
+ self.norm1(hidden_states),
860
+ cu_seqlens=cu_seqlens,
861
+ rotary_pos_emb=rotary_pos_emb,
862
+ position_embeddings=position_embeddings,
863
+ **kwargs,
864
+ )
865
+ hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
866
+ return hidden_states
867
+
868
+
869
+ @auto_docstring
870
+ class Ernie4_5_VL_MoeVisionTransformerPretrainedModel(Ernie4_5_VL_MoePreTrainedModel):
871
+ config: Ernie4_5_VL_MoeVisionConfig
872
+ input_modalities = ("image", "video")
873
+ _no_split_modules = ["Ernie4_5_VL_MoeVisionBlock"]
874
+ _input_embed_layer = "patch_embed"
875
+
876
+ def __init__(self, config) -> None:
877
+ super().__init__(config)
878
+ self.spatial_merge_size = config.spatial_merge_size
879
+
880
+ self.patch_embed = Ernie4_5_VL_MoePatchEmbed(
881
+ patch_size=config.patch_size,
882
+ in_channels=config.in_channels,
883
+ embed_dim=config.hidden_size,
884
+ )
885
+
886
+ head_dim = config.hidden_size // config.num_heads
887
+ self.rotary_pos_emb = Ernie4_5_VL_MoeVisionRotaryEmbedding(head_dim // 2)
888
+
889
+ self.blocks = nn.ModuleList([Ernie4_5_VL_MoeVisionBlock(config) for _ in range(config.depth)])
890
+ self.gradient_checkpointing = False
891
+
892
+ self.ln = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
893
+
894
+ self.post_init()
895
+
896
+ def rot_pos_emb(self, grid_thw):
897
+ pos_ids = []
898
+ for t, h, w in grid_thw:
899
+ hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
900
+ hpos_ids = hpos_ids.reshape(
901
+ h // self.spatial_merge_size,
902
+ self.spatial_merge_size,
903
+ w // self.spatial_merge_size,
904
+ self.spatial_merge_size,
905
+ )
906
+ hpos_ids = hpos_ids.permute(0, 2, 1, 3)
907
+ hpos_ids = hpos_ids.flatten()
908
+
909
+ wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
910
+ wpos_ids = wpos_ids.reshape(
911
+ h // self.spatial_merge_size,
912
+ self.spatial_merge_size,
913
+ w // self.spatial_merge_size,
914
+ self.spatial_merge_size,
915
+ )
916
+ wpos_ids = wpos_ids.permute(0, 2, 1, 3)
917
+ wpos_ids = wpos_ids.flatten()
918
+ pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
919
+ pos_ids = torch.cat(pos_ids, dim=0)
920
+ max_grid_size = grid_thw[:, 1:].max()
921
+ rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
922
+ rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
923
+ return rotary_pos_emb
924
+
925
+ @auto_docstring
926
+ def forward(
927
+ self,
928
+ hidden_states: torch.Tensor,
929
+ grid_thw: torch.Tensor,
930
+ **kwargs,
931
+ ) -> torch.Tensor:
932
+ r"""
933
+ grid_thw (`torch.LongTensor` of shape `(num_images, 3)`):
934
+ The temporal, height and width dimensions of feature shape for each image. Each row contains [t, h, w] values.
935
+ """
936
+ hidden_states = self.patch_embed(hidden_states)
937
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
938
+ emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
939
+ position_embeddings = (emb.cos(), emb.sin())
940
+
941
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
942
+ dim=0,
943
+ # Select dtype based on the following factors:
944
+ # - FA2 requires that cu_seqlens_q must have dtype int32
945
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
946
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
947
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
948
+ )
949
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
950
+
951
+ for block in self.blocks:
952
+ hidden_states = block(
953
+ hidden_states,
954
+ cu_seqlens=cu_seqlens,
955
+ position_embeddings=position_embeddings,
956
+ **kwargs,
957
+ )
958
+ hidden_states = self.ln(hidden_states)
959
+ return hidden_states
960
+
961
+
962
+ class Ernie4_5_VL_MoeVisionMLP(nn.Module):
963
+ def __init__(self, config, in_dim, out_dim):
964
+ super().__init__()
965
+
966
+ self.fc1 = nn.Linear(in_dim, out_dim)
967
+ self.act_fn = nn.GELU()
968
+ self.fc2 = nn.Linear(out_dim, out_dim)
969
+ self.ln = nn.LayerNorm(out_dim, eps=config.vision_config.rms_norm_eps)
970
+
971
+ def forward(self, hidden_states):
972
+ hidden_states = self.fc1(hidden_states)
973
+ hidden_states = self.act_fn(hidden_states)
974
+ hidden_states = self.fc2(hidden_states)
975
+ hidden_states = self.ln(hidden_states)
976
+ return hidden_states
977
+
978
+
979
+ class Ernie4_5_VL_MoeVariableResolutionResamplerModel(nn.Module):
980
+ def __init__(self, config: Ernie4_5_VL_MoeConfig):
981
+ super().__init__()
982
+ self.config = config
983
+
984
+ self.in_dim = config.vision_config.hidden_size
985
+ self.out_dim = config.text_config.hidden_size
986
+ self.spatial_merge_size = config.vision_config.spatial_merge_size
987
+ self.temporal_merge_size = config.vision_config.temporal_merge_size
988
+
989
+ # compress 2d conv(picture) to 1d
990
+ self.spatial_dim = self.in_dim * self.spatial_merge_size**2
991
+ # compress 3d conv(video) to 1d
992
+ self.temporal_dim = self.in_dim * self.spatial_merge_size**2 * self.temporal_merge_size
993
+
994
+ self.spatial_linear = Ernie4_5_VL_MoeVisionMLP(config, self.spatial_dim, self.spatial_dim)
995
+ self.temporal_linear = Ernie4_5_VL_MoeVisionMLP(config, self.temporal_dim, self.spatial_dim)
996
+
997
+ self.mlp = nn.Linear(self.spatial_dim, self.out_dim)
998
+ self.after_norm = Ernie4_5_VL_MoeRMSNorm(self.out_dim, config.text_config.rms_norm_eps)
999
+
1000
+ def _temporal_slicing(self, hidden_states, grid_thw):
1001
+ """
1002
+ Slices along the temporal dimension in even/odd patterns (usually if we have a video input)
1003
+ or duplicates along temporal dimension (usually if we have an image input).
1004
+
1005
+ Example:
1006
+ Video input with temporal pattern of [1, -1, 2, -2, 3, -3]
1007
+ > Even input [1, 2, 3], odd input [-1, -2, -3]
1008
+ > Reorderd via slices to [1, 2, 3, -1, -2, -3]
1009
+ Image input with temporal pattern [1]
1010
+ > Duplicate input [1], [1]
1011
+ > Reordered to [1, 1]
1012
+
1013
+ NOTE: This is hard-coded for `temporal_merge_size == 2` and won't work otherwise.
1014
+ """
1015
+ # Calculating offsets on spatial dim (based on flattened tensors)
1016
+ grid_t, grid_hw = grid_thw[:, 0], grid_thw[:, 1:]
1017
+ grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_merge_size**2)
1018
+
1019
+ # Calculating offsets on batch dim (based on flattened tensors)
1020
+ tokens_per_img_or_vid = (grid_thw.prod(-1) // (self.spatial_merge_size**2)).flatten()
1021
+ batch_offsets = torch.empty(tokens_per_img_or_vid.size(), dtype=tokens_per_img_or_vid.dtype)
1022
+ batch_offsets[0] = 0
1023
+ batch_offsets[1:] = tokens_per_img_or_vid.cumsum(dim=0)[:-1]
1024
+
1025
+ first_slice_offsets = []
1026
+ second_slice_offsets = []
1027
+ for temporal_size, spatial_size, batch_offset in zip(grid_t, grid_hw_after_conv, batch_offsets):
1028
+ # Depending on temporal, we may interleave:
1029
+ # - Images have temporal == 1 --> same offsets (duplicate "frame" image)
1030
+ # - Videos have temporal > 1 --> different offsets (even, odd)
1031
+ first_offset_range = range(0, temporal_size, 2)
1032
+ second_offset_range = range(1 if temporal_size > 1 else 0, temporal_size, 2)
1033
+
1034
+ for temporal_offset_even, temporal_offset_odd in zip(first_offset_range, second_offset_range):
1035
+ first_slice_offsets.append(
1036
+ torch.arange(
1037
+ batch_offset + (temporal_offset_even) * spatial_size,
1038
+ batch_offset + (temporal_offset_even + 1) * spatial_size,
1039
+ )
1040
+ )
1041
+ second_slice_offsets.append(
1042
+ torch.arange(
1043
+ batch_offset + (temporal_offset_odd) * spatial_size,
1044
+ batch_offset + (temporal_offset_odd + 1) * spatial_size,
1045
+ )
1046
+ )
1047
+
1048
+ # Input: [1, -1, 2, -2, 3, -3] or [1]
1049
+ # Indices: [0, 2, 4] (even) or [0] (duplicate)
1050
+ first_slice_offsets = torch.cat(first_slice_offsets, dim=-1).to(hidden_states.device)
1051
+ # Indices: [1, 3, 5] (odd) or [0] (duplicate)
1052
+ second_slice_offsets = torch.cat(second_slice_offsets, dim=-1).to(hidden_states.device)
1053
+
1054
+ # Output: [1, 2, 3, -1, -2, -3] or [1, 1]
1055
+ return torch.concat(
1056
+ [
1057
+ torch.index_select(hidden_states, dim=0, index=first_slice_offsets),
1058
+ torch.index_select(hidden_states, dim=0, index=second_slice_offsets),
1059
+ ],
1060
+ dim=-1,
1061
+ )
1062
+
1063
+ def forward(self, hidden_states, grid_thw):
1064
+ # image spatial
1065
+ # reshape imitates convolution via linear projection
1066
+ hidden_states = hidden_states.reshape([-1, hidden_states.shape[-1] * (self.spatial_merge_size**2)])
1067
+ hidden_states = self.spatial_linear(hidden_states)
1068
+
1069
+ # video temporal
1070
+ hidden_states = self._temporal_slicing(hidden_states, grid_thw)
1071
+ hidden_states = self.temporal_linear(hidden_states)
1072
+
1073
+ # final mlp
1074
+ hidden_states = self.mlp(hidden_states)
1075
+ hidden_states = self.after_norm(hidden_states)
1076
+
1077
+ return hidden_states
1078
+
1079
+
1080
+ @auto_docstring
1081
+ class Ernie4_5_VL_MoeModel(Ernie4_5_VL_MoePreTrainedModel):
1082
+ base_model_prefix = "model"
1083
+ _checkpoint_conversion_mapping = {"^norm": "language_model.norm"}
1084
+ # Reference: fix gemma3 grad acc #37208
1085
+ accepts_loss_kwargs = False
1086
+ config: Ernie4_5_VL_MoeConfig
1087
+ _no_split_modules = ["Ernie4_5_VL_MoeDecoderLayer", "Ernie4_5_VL_MoeVisionBlock"]
1088
+
1089
+ def __init__(self, config: Ernie4_5_VL_MoeConfig):
1090
+ super().__init__(config)
1091
+ self.language_model = Ernie4_5_VL_MoeTextModel._from_config(config.text_config)
1092
+ self.rope_deltas = None # cache rope_deltas here
1093
+ self.vision_tower = Ernie4_5_VL_MoeVisionTransformerPretrainedModel._from_config(config.vision_config)
1094
+ self.resampler_model = Ernie4_5_VL_MoeVariableResolutionResamplerModel(config)
1095
+
1096
+ # Initialize weights and apply final processing
1097
+ self.post_init()
1098
+
1099
+ def get_input_embeddings(self):
1100
+ return self.language_model.get_input_embeddings()
1101
+
1102
+ def set_input_embeddings(self, value):
1103
+ self.language_model.set_input_embeddings(value)
1104
+
1105
+ def get_rope_index(
1106
+ self,
1107
+ input_ids: torch.LongTensor | None = None,
1108
+ image_grid_thw: torch.LongTensor | None = None,
1109
+ video_grid_thw: torch.LongTensor | None = None,
1110
+ attention_mask: torch.Tensor | None = None,
1111
+ mm_token_type_ids: torch.IntTensor | None = None,
1112
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1113
+ """
1114
+ Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
1115
+
1116
+ Explanation:
1117
+ Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
1118
+
1119
+ For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
1120
+ Examples:
1121
+ input_ids: [T T T T T], here T is for text.
1122
+ temporal position_ids: [0, 1, 2, 3, 4]
1123
+ height position_ids: [0, 1, 2, 3, 4]
1124
+ width position_ids: [0, 1, 2, 3, 4]
1125
+
1126
+ For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
1127
+ and 1D rotary position embedding for text part.
1128
+ Examples:
1129
+ Temporal (Time): 3 patches, representing different segments of the video in time.
1130
+ Height: 2 patches, dividing each frame vertically.
1131
+ Width: 2 patches, dividing each frame horizontally.
1132
+ We also have some important parameters:
1133
+ fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
1134
+ tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
1135
+ temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
1136
+ interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
1137
+ input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
1138
+ vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
1139
+ vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
1140
+ vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
1141
+ text temporal position_ids: [101, 102, 103, 104, 105]
1142
+ text height position_ids: [101, 102, 103, 104, 105]
1143
+ text width position_ids: [101, 102, 103, 104, 105]
1144
+ Here we calculate the text start position_ids as the max vision position_ids plus 1.
1145
+
1146
+ Args:
1147
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1148
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1149
+ it.
1150
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1151
+ The temporal, height and width of feature shape of each image in LLM.
1152
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1153
+ The temporal, height and width of feature shape of each video in LLM.
1154
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1155
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1156
+
1157
+ - 1 for tokens that are **not masked**,
1158
+ - 0 for tokens that are **masked**.
1159
+ mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1160
+ Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1161
+
1162
+ Returns:
1163
+ position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
1164
+ mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
1165
+ """
1166
+
1167
+ temporal_merge_size = self.config.vision_config.temporal_merge_size
1168
+ spatial_merge_size = self.config.vision_config.spatial_merge_size
1169
+
1170
+ mrope_position_deltas = []
1171
+ if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
1172
+ total_input_ids = input_ids
1173
+ if attention_mask is None:
1174
+ attention_mask = torch.ones_like(total_input_ids)
1175
+ position_ids = torch.ones(
1176
+ 3,
1177
+ input_ids.shape[0],
1178
+ input_ids.shape[1],
1179
+ dtype=input_ids.dtype,
1180
+ device=input_ids.device,
1181
+ )
1182
+ image_index, video_index = 0, 0
1183
+ attention_mask = attention_mask.to(total_input_ids.device)
1184
+ for i, input_ids in enumerate(total_input_ids):
1185
+ # If we don't have `mm_token_type_ids`, then we have text tokens only (== 0)
1186
+ if mm_token_type_ids is None:
1187
+ input_token_type = torch.zeros_like(input_ids)[attention_mask[i] == 1].tolist()
1188
+ else:
1189
+ input_token_type = mm_token_type_ids[i, attention_mask[i] == 1].tolist()
1190
+
1191
+ input_type_group = []
1192
+ for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]):
1193
+ group = list(group)
1194
+ start_index = group[0][0]
1195
+ end_index = group[-1][0] + 1
1196
+ input_type_group.append((key, start_index, end_index))
1197
+
1198
+ llm_pos_ids_list = []
1199
+ for modality_type, start_idx, end_idx in input_type_group:
1200
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1201
+
1202
+ # text == 0
1203
+ if modality_type == 0:
1204
+ text_len = end_idx - start_idx
1205
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1206
+
1207
+ # image == 1, video == 2
1208
+ else:
1209
+ grid_thw = image_grid_thw if modality_type == 1 else video_grid_thw
1210
+ mm_index = image_index if modality_type == 1 else video_index
1211
+ t_merge_size = 1 if modality_type == 1 else temporal_merge_size
1212
+
1213
+ t, h, w = (
1214
+ grid_thw[mm_index][0],
1215
+ grid_thw[mm_index][1],
1216
+ grid_thw[mm_index][2],
1217
+ )
1218
+ llm_grid_t, llm_grid_h, llm_grid_w = (
1219
+ t.item() // t_merge_size,
1220
+ h.item() // spatial_merge_size,
1221
+ w.item() // spatial_merge_size,
1222
+ )
1223
+
1224
+ for t_idx in range(llm_grid_t):
1225
+ t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
1226
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten()
1227
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten()
1228
+ llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
1229
+
1230
+ if modality_type == 1:
1231
+ image_index += 1
1232
+ else:
1233
+ video_index += 1
1234
+
1235
+ llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1236
+ position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
1237
+ mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
1238
+ mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
1239
+ return position_ids, mrope_position_deltas
1240
+ else:
1241
+ if attention_mask is not None:
1242
+ position_ids = attention_mask.long().cumsum(-1) - 1
1243
+ position_ids.masked_fill_(attention_mask == 0, 1)
1244
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
1245
+ max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
1246
+ mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
1247
+ else:
1248
+ position_ids = (
1249
+ torch.arange(input_ids.shape[1], device=input_ids.device)
1250
+ .view(1, 1, -1)
1251
+ .expand(3, input_ids.shape[0], -1)
1252
+ )
1253
+ mrope_position_deltas = torch.zeros(
1254
+ [input_ids.shape[0], 1],
1255
+ device=input_ids.device,
1256
+ dtype=input_ids.dtype,
1257
+ )
1258
+
1259
+ return position_ids, mrope_position_deltas
1260
+
1261
+ def get_video_features(
1262
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: torch.LongTensor | None = None
1263
+ ):
1264
+ """
1265
+ Encodes videos into continuous embeddings that can be forwarded to the language model.
1266
+
1267
+ Args:
1268
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1269
+ The tensors corresponding to the input videos.
1270
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1271
+ The temporal, height and width of feature shape of each video in LLM.
1272
+ """
1273
+ video_embeds = self.vision_tower(pixel_values_videos, video_grid_thw)
1274
+ video_embeds = self.resampler_model(video_embeds, video_grid_thw)
1275
+ split_sizes = (
1276
+ video_grid_thw.prod(-1)
1277
+ // self.vision_tower.spatial_merge_size**2
1278
+ // self.resampler_model.temporal_merge_size
1279
+ ).tolist()
1280
+ video_embeds = torch.split(video_embeds, split_sizes)
1281
+ return video_embeds
1282
+
1283
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: torch.LongTensor | None = None):
1284
+ """
1285
+ Encodes images into continuous embeddings that can be forwarded to the language model.
1286
+
1287
+ Args:
1288
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1289
+ The tensors corresponding to the input images.
1290
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1291
+ The temporal, height and width of feature shape of each image in LLM.
1292
+ """
1293
+ image_embeds = self.vision_tower(pixel_values, image_grid_thw)
1294
+ image_embeds = self.resampler_model(image_embeds, image_grid_thw)
1295
+ split_sizes = (image_grid_thw.prod(-1) // self.vision_tower.spatial_merge_size**2).tolist()
1296
+ image_embeds = torch.split(image_embeds, split_sizes)
1297
+ return image_embeds
1298
+
1299
+ def get_placeholder_mask(
1300
+ self,
1301
+ input_ids: torch.LongTensor,
1302
+ inputs_embeds: torch.FloatTensor,
1303
+ image_features: torch.FloatTensor | None = None,
1304
+ video_features: torch.FloatTensor | None = None,
1305
+ ):
1306
+ """
1307
+ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
1308
+ equal to the length of multimodal features. If the lengths are different, an error is raised.
1309
+ """
1310
+ if input_ids is None:
1311
+ special_image_mask = inputs_embeds == self.get_input_embeddings()(
1312
+ torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
1313
+ )
1314
+ special_image_mask = special_image_mask.all(-1)
1315
+ special_video_mask = inputs_embeds == self.get_input_embeddings()(
1316
+ torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
1317
+ )
1318
+ special_video_mask = special_video_mask.all(-1)
1319
+ else:
1320
+ special_image_mask = input_ids == self.config.image_token_id
1321
+ special_video_mask = input_ids == self.config.video_token_id
1322
+
1323
+ n_image_tokens = special_image_mask.sum()
1324
+ special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1325
+ if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
1326
+ raise ValueError(
1327
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
1328
+ )
1329
+
1330
+ n_video_tokens = special_video_mask.sum()
1331
+ special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1332
+ if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
1333
+ raise ValueError(
1334
+ f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
1335
+ )
1336
+
1337
+ return special_image_mask, special_video_mask
1338
+
1339
+ @auto_docstring
1340
+ @can_return_tuple
1341
+ def forward(
1342
+ self,
1343
+ input_ids: torch.LongTensor = None,
1344
+ attention_mask: torch.Tensor | None = None,
1345
+ position_ids: torch.LongTensor | None = None,
1346
+ mm_token_type_ids: torch.IntTensor | None = None,
1347
+ moe_mm_token_type_ids: torch.IntTensor | None = None,
1348
+ past_key_values: Cache | None = None,
1349
+ inputs_embeds: torch.FloatTensor | None = None,
1350
+ use_cache: bool | None = None,
1351
+ pixel_values: torch.Tensor | None = None,
1352
+ pixel_values_videos: torch.FloatTensor | None = None,
1353
+ image_grid_thw: torch.LongTensor | None = None,
1354
+ video_grid_thw: torch.LongTensor | None = None,
1355
+ rope_deltas: torch.LongTensor | None = None,
1356
+ cache_position: torch.LongTensor | None = None,
1357
+ **kwargs: Unpack[TransformersKwargs],
1358
+ ) -> tuple | MoeModelOutputWithPast:
1359
+ r"""
1360
+ mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1361
+ Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1362
+ moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1363
+ The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
1364
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1365
+ The temporal, height and width of feature shape of each image in LLM.
1366
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1367
+ The temporal, height and width of feature shape of each video in LLM.
1368
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1369
+ The rope index difference between sequence length and multimodal rope.
1370
+ """
1371
+ if inputs_embeds is None:
1372
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1373
+
1374
+ if pixel_values is not None:
1375
+ image_embeds = self.get_image_features(pixel_values, image_grid_thw)
1376
+ image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1377
+ image_mask, _ = self.get_placeholder_mask(
1378
+ input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
1379
+ )
1380
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1381
+
1382
+ if pixel_values_videos is not None:
1383
+ video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
1384
+ video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1385
+ _, video_mask = self.get_placeholder_mask(
1386
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1387
+ )
1388
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
1389
+
1390
+ if position_ids is None:
1391
+ position_ids = self.get_position_ids(
1392
+ input_ids=input_ids,
1393
+ attention_mask=attention_mask,
1394
+ past_key_values=past_key_values,
1395
+ inputs_embeds=inputs_embeds,
1396
+ image_grid_thw=image_grid_thw,
1397
+ video_grid_thw=video_grid_thw,
1398
+ cache_position=cache_position,
1399
+ mm_token_type_ids=mm_token_type_ids,
1400
+ )
1401
+
1402
+ outputs = self.language_model(
1403
+ input_ids=None,
1404
+ position_ids=position_ids,
1405
+ moe_mm_token_type_ids=moe_mm_token_type_ids,
1406
+ attention_mask=attention_mask,
1407
+ use_cache=use_cache,
1408
+ past_key_values=past_key_values,
1409
+ inputs_embeds=inputs_embeds,
1410
+ return_dict=True,
1411
+ cache_position=cache_position,
1412
+ **kwargs,
1413
+ )
1414
+
1415
+ return MoeModelOutputWithPast(
1416
+ last_hidden_state=outputs.last_hidden_state,
1417
+ past_key_values=outputs.past_key_values,
1418
+ hidden_states=outputs.hidden_states,
1419
+ attentions=outputs.attentions,
1420
+ router_logits=outputs.router_logits,
1421
+ )
1422
+
1423
+ # TODO: Should be moved to generation loop instead in the future
1424
+ # Relevant PR(s): https://github.com/huggingface/transformers/pull/42088
1425
+ def get_position_ids(
1426
+ self,
1427
+ input_ids: torch.LongTensor = None,
1428
+ attention_mask: torch.Tensor | None = None,
1429
+ past_key_values: Cache | None = None,
1430
+ inputs_embeds: torch.FloatTensor | None = None,
1431
+ image_grid_thw: torch.LongTensor | None = None,
1432
+ video_grid_thw: torch.LongTensor | None = None,
1433
+ cache_position: torch.LongTensor | None = None,
1434
+ mm_token_type_ids: torch.IntTensor | None = None,
1435
+ ):
1436
+ """
1437
+ Calculating the 3D position ids with a custom mechanism / caching
1438
+ - First forward calculates the initial positions and the respective
1439
+ deltas (offset) for subsequent positions. See `get_rope_index` for
1440
+ more details.
1441
+ - Second and on (generation), uses the cache position combined with the
1442
+ cached deltas to determine the current position.
1443
+
1444
+ NOTE: We assume that the position ids are `None` and recalculate them here in any case.
1445
+ """
1446
+ # Calculate RoPE index once per generation in the pre-fill stage only.
1447
+ # When compiling, we can't check tensor values thus we check only input length
1448
+ # It is safe to assume that `length!=1` means we're in pre-fill because compiled
1449
+ # models currently cannot do asssisted decoding
1450
+ prefill_compiled_stage = is_torchdynamo_compiling() and (
1451
+ (input_ids is not None and input_ids.shape[1] != 1)
1452
+ or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
1453
+ )
1454
+ prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
1455
+ (cache_position is not None and cache_position[0] == 0)
1456
+ or (past_key_values is None or past_key_values.get_seq_length() == 0)
1457
+ )
1458
+ if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
1459
+ position_ids, rope_deltas = self.get_rope_index(
1460
+ input_ids,
1461
+ image_grid_thw,
1462
+ video_grid_thw,
1463
+ attention_mask=attention_mask,
1464
+ mm_token_type_ids=mm_token_type_ids,
1465
+ )
1466
+ self.rope_deltas = rope_deltas
1467
+ # then use the prev pre-calculated rope-deltas to get the correct position ids
1468
+ else:
1469
+ if input_ids is not None:
1470
+ batch_size, seq_length, device = input_ids.shape[0], 1, input_ids.device
1471
+ elif inputs_embeds is not None:
1472
+ batch_size, seq_length, device = inputs_embeds.shape[0], 1, inputs_embeds.device
1473
+ else:
1474
+ raise ValueError(
1475
+ "Cannot calculate position ids without any input to the model. "
1476
+ "Need either `input_ids` or `inputs_embeds`!"
1477
+ )
1478
+
1479
+ delta = (cache_position[0] + self.rope_deltas).to(device) if cache_position is not None else 0
1480
+ position_ids = torch.arange(seq_length, device=device)
1481
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
1482
+ if cache_position is not None: # otherwise `deltas` is an int `0`
1483
+ delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
1484
+ position_ids = position_ids.add(delta)
1485
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
1486
+
1487
+ return position_ids
1488
+
1489
+
1490
+ def load_balancing_loss_func(
1491
+ gate_logits: torch.Tensor | tuple[torch.Tensor] | None,
1492
+ num_experts: int | None = None,
1493
+ top_k=2,
1494
+ attention_mask: torch.Tensor | None = None,
1495
+ ) -> torch.Tensor | int:
1496
+ r"""
1497
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
1498
+
1499
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
1500
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
1501
+ experts is too unbalanced.
1502
+
1503
+ Args:
1504
+ gate_logits:
1505
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
1506
+ shape [batch_size X sequence_length, num_experts].
1507
+ num_experts:
1508
+ Number of experts
1509
+ top_k:
1510
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
1511
+ parameter.
1512
+ attention_mask (`torch.Tensor`, *optional*):
1513
+ The attention_mask used in forward function
1514
+ shape [batch_size X sequence_length] if not None.
1515
+
1516
+ Returns:
1517
+ The auxiliary loss.
1518
+ """
1519
+ if gate_logits is None or not isinstance(gate_logits, tuple):
1520
+ return 0
1521
+
1522
+ if isinstance(gate_logits, tuple):
1523
+ compute_device = gate_logits[0].device
1524
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
1525
+
1526
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
1527
+
1528
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
1529
+
1530
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
1531
+
1532
+ if attention_mask is None:
1533
+ # Compute the percentage of tokens routed to each experts
1534
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
1535
+
1536
+ # Compute the average probability of routing to these experts
1537
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
1538
+ else:
1539
+ batch_size, sequence_length = attention_mask.shape
1540
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
1541
+
1542
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
1543
+ expert_attention_mask = (
1544
+ attention_mask[None, :, :, None, None]
1545
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
1546
+ .reshape(-1, top_k, num_experts)
1547
+ .to(compute_device)
1548
+ )
1549
+
1550
+ # Compute the percentage of tokens routed to each experts
1551
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
1552
+ expert_attention_mask, dim=0
1553
+ )
1554
+
1555
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
1556
+ router_per_expert_attention_mask = (
1557
+ attention_mask[None, :, :, None]
1558
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
1559
+ .reshape(-1, num_experts)
1560
+ .to(compute_device)
1561
+ )
1562
+
1563
+ # Compute the average probability of routing to these experts
1564
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
1565
+ router_per_expert_attention_mask, dim=0
1566
+ )
1567
+
1568
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
1569
+ return overall_loss * num_experts
1570
+
1571
+
1572
+ class Ernie4_5_VL_MoeForConditionalGeneration(Ernie4_5_VL_MoePreTrainedModel, GenerationMixin):
1573
+ _checkpoint_conversion_mapping = {"^model.norm": "model.language_model.norm"}
1574
+ _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
1575
+ # Reference: fix gemma3 grad acc #37208
1576
+ accepts_loss_kwargs = False
1577
+
1578
+ def __init__(self, config):
1579
+ super().__init__(config)
1580
+ self.model = Ernie4_5_VL_MoeModel(config)
1581
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1582
+
1583
+ self.router_aux_loss_coef = config.text_config.router_aux_loss_coef
1584
+ self.num_experts = config.text_config.moe_num_experts
1585
+ self.num_experts_per_tok = config.text_config.moe_k
1586
+
1587
+ self.post_init()
1588
+
1589
+ def get_input_embeddings(self):
1590
+ return self.model.get_input_embeddings()
1591
+
1592
+ def set_input_embeddings(self, value):
1593
+ self.model.set_input_embeddings(value)
1594
+
1595
+ def get_video_features(
1596
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: torch.LongTensor | None = None
1597
+ ):
1598
+ return self.model.get_video_features(pixel_values_videos, video_grid_thw)
1599
+
1600
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: torch.LongTensor | None = None):
1601
+ return self.model.get_image_features(pixel_values, image_grid_thw)
1602
+
1603
+ @auto_docstring
1604
+ @can_return_tuple
1605
+ def forward(
1606
+ self,
1607
+ input_ids: torch.LongTensor = None,
1608
+ attention_mask: torch.Tensor | None = None,
1609
+ position_ids: torch.LongTensor | None = None,
1610
+ mm_token_type_ids: torch.IntTensor | None = None,
1611
+ moe_mm_token_type_ids: torch.IntTensor | None = None,
1612
+ past_key_values: Cache | None = None,
1613
+ inputs_embeds: torch.FloatTensor | None = None,
1614
+ labels: torch.LongTensor | None = None,
1615
+ use_cache: bool | None = None,
1616
+ output_router_logits: bool | None = None,
1617
+ pixel_values: torch.Tensor | None = None,
1618
+ pixel_values_videos: torch.FloatTensor | None = None,
1619
+ image_grid_thw: torch.LongTensor | None = None,
1620
+ video_grid_thw: torch.LongTensor | None = None,
1621
+ rope_deltas: torch.LongTensor | None = None,
1622
+ cache_position: torch.LongTensor | None = None,
1623
+ logits_to_keep: int | torch.Tensor = 0,
1624
+ **kwargs: Unpack[TransformersKwargs],
1625
+ ) -> tuple | MoeCausalLMOutputWithPast:
1626
+ r"""
1627
+ mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1628
+ Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1629
+ moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1630
+ The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
1631
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1632
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1633
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1634
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1635
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1636
+ The temporal, height and width of feature shape of each image in LLM.
1637
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1638
+ The temporal, height and width of feature shape of each video in LLM.
1639
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1640
+ The rope index difference between sequence length and multimodal rope.
1641
+ """
1642
+ output_router_logits = (
1643
+ output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits
1644
+ )
1645
+
1646
+ outputs = self.model(
1647
+ input_ids=input_ids,
1648
+ attention_mask=attention_mask,
1649
+ position_ids=position_ids,
1650
+ mm_token_type_ids=mm_token_type_ids,
1651
+ moe_mm_token_type_ids=moe_mm_token_type_ids,
1652
+ past_key_values=past_key_values,
1653
+ inputs_embeds=inputs_embeds,
1654
+ use_cache=use_cache,
1655
+ output_router_logits=output_router_logits,
1656
+ return_dict=True,
1657
+ pixel_values=pixel_values,
1658
+ pixel_values_videos=pixel_values_videos,
1659
+ image_grid_thw=image_grid_thw,
1660
+ video_grid_thw=video_grid_thw,
1661
+ rope_deltas=rope_deltas,
1662
+ cache_position=cache_position,
1663
+ **kwargs,
1664
+ )
1665
+
1666
+ hidden_states = outputs.last_hidden_state
1667
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1668
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1669
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1670
+
1671
+ loss = None
1672
+ if labels is not None:
1673
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
1674
+
1675
+ aux_loss = None
1676
+ if output_router_logits:
1677
+ aux_loss = load_balancing_loss_func(
1678
+ outputs.router_logits,
1679
+ self.num_experts,
1680
+ self.num_experts_per_tok,
1681
+ attention_mask,
1682
+ )
1683
+ if labels is not None:
1684
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1685
+
1686
+ return MoeCausalLMOutputWithPast(
1687
+ loss=loss,
1688
+ aux_loss=aux_loss,
1689
+ logits=logits,
1690
+ past_key_values=outputs.past_key_values,
1691
+ hidden_states=outputs.hidden_states,
1692
+ attentions=outputs.attentions,
1693
+ router_logits=outputs.router_logits,
1694
+ )
1695
+
1696
+ def prepare_inputs_for_generation(
1697
+ self,
1698
+ input_ids,
1699
+ inputs_embeds=None,
1700
+ attention_mask=None,
1701
+ cache_position=None,
1702
+ past_key_values=None,
1703
+ image_grid_thw=None,
1704
+ video_grid_thw=None,
1705
+ use_cache=True,
1706
+ is_first_iteration=False,
1707
+ # Intentionally ignore position ids to force custom cache logic
1708
+ position_ids=None,
1709
+ **kwargs,
1710
+ ):
1711
+ model_inputs = super().prepare_inputs_for_generation(
1712
+ input_ids,
1713
+ inputs_embeds=inputs_embeds,
1714
+ attention_mask=attention_mask,
1715
+ cache_position=cache_position,
1716
+ past_key_values=past_key_values,
1717
+ image_grid_thw=image_grid_thw,
1718
+ video_grid_thw=video_grid_thw,
1719
+ use_cache=use_cache,
1720
+ is_first_iteration=is_first_iteration,
1721
+ **kwargs,
1722
+ )
1723
+
1724
+ # Using our own caching with rope delta
1725
+ model_inputs["position_ids"] = self.model.get_position_ids(
1726
+ input_ids=model_inputs.get("input_ids"),
1727
+ attention_mask=model_inputs.get("attention_mask"),
1728
+ past_key_values=model_inputs.get("past_key_values"),
1729
+ inputs_embeds=model_inputs.get("inputs_embeds"),
1730
+ image_grid_thw=model_inputs.get("image_grid_thw"),
1731
+ video_grid_thw=model_inputs.get("video_grid_thw"),
1732
+ cache_position=model_inputs.get("cache_position"),
1733
+ mm_token_type_ids=model_inputs.get("mm_token_type_ids"),
1734
+ )
1735
+
1736
+ if not is_first_iteration and use_cache:
1737
+ model_inputs["pixel_values"] = None
1738
+ model_inputs["pixel_values_videos"] = None
1739
+ model_inputs["mm_token_type_ids"] = None
1740
+ model_inputs["moe_mm_token_type_ids"] = None
1741
+
1742
+ return model_inputs
1743
+
1744
+ def _get_image_nums_and_video_nums(
1745
+ self,
1746
+ input_ids: torch.LongTensor | None,
1747
+ inputs_embeds: torch.Tensor | None = None,
1748
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1749
+ """
1750
+ Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
1751
+ These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
1752
+
1753
+ Args:
1754
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1755
+ Indices of input sequence tokens in the vocabulary.
1756
+
1757
+ Returns:
1758
+ image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
1759
+ video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
1760
+ """
1761
+
1762
+ if inputs_embeds is not None:
1763
+ is_image = (
1764
+ inputs_embeds
1765
+ == self.get_input_embeddings()(
1766
+ torch.tensor(self.config.image_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1767
+ )
1768
+ )[..., 0]
1769
+ is_video_start = (
1770
+ inputs_embeds
1771
+ == self.get_input_embeddings()(
1772
+ torch.tensor(self.config.video_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1773
+ )
1774
+ )[..., 0]
1775
+ is_video_end = (
1776
+ inputs_embeds
1777
+ == self.get_input_embeddings()(
1778
+ torch.tensor(self.config.video_end_token_id, dtype=torch.long, device=inputs_embeds.device)
1779
+ )
1780
+ )[..., 0]
1781
+ else:
1782
+ is_image = input_ids == self.config.image_start_token_id
1783
+ is_video_start = input_ids == self.config.video_start_token_id
1784
+ is_video_end = input_ids == self.config.video_end_token_id
1785
+
1786
+ # Cumulative sum to track if we're inside a video span
1787
+ # We'll assume well-formed video tags (i.e. matching starts and ends)
1788
+ video_level = torch.cumsum(is_video_start.int() - is_video_end.int(), dim=1)
1789
+ inside_video = video_level > 0 # shape (batch_size, seq_length)
1790
+
1791
+ # Mask out image tokens that are inside video spans
1792
+ standalone_images = is_image & (~inside_video)
1793
+
1794
+ # Count per batch
1795
+ image_counts = standalone_images.sum(dim=1)
1796
+ video_counts = is_video_start.sum(dim=1)
1797
+
1798
+ return image_counts, video_counts
1799
+
1800
+ def _expand_inputs_for_generation(
1801
+ self,
1802
+ expand_size: int = 1,
1803
+ is_encoder_decoder: bool = False,
1804
+ input_ids: torch.LongTensor | None = None,
1805
+ **model_kwargs,
1806
+ ) -> tuple[torch.LongTensor, dict[str, Any]]:
1807
+ # Overwritten -- Support for expanding tensors without a batch size dimension
1808
+ # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
1809
+ # pixel_values.shape[0] is sum(seqlen_images for samples)
1810
+ # image_grid_thw.shape[0] is sum(num_images for samples)
1811
+
1812
+ if expand_size == 1:
1813
+ return input_ids, model_kwargs
1814
+
1815
+ visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"]
1816
+
1817
+ def _expand_dict_for_generation_visual(dict_to_expand):
1818
+ image_grid_thw = model_kwargs.get("image_grid_thw", None)
1819
+ video_grid_thw = model_kwargs.get("video_grid_thw", None)
1820
+ image_nums, video_nums = self._get_image_nums_and_video_nums(
1821
+ input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
1822
+ )
1823
+
1824
+ def _repeat_interleave_samples(x, lengths, repeat_times):
1825
+ samples = torch.split(x, lengths)
1826
+ repeat_args = [repeat_times] + [1] * (x.dim() - 1)
1827
+ result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
1828
+ return result
1829
+
1830
+ for key in dict_to_expand:
1831
+ if key == "pixel_values":
1832
+ # split images into samples
1833
+ samples = torch.split(image_grid_thw, list(image_nums))
1834
+ # compute the sequence length of images for each sample
1835
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1836
+ dict_to_expand[key] = _repeat_interleave_samples(
1837
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1838
+ )
1839
+ elif key == "image_grid_thw":
1840
+ # get the num of images for each sample
1841
+ lengths = list(image_nums)
1842
+ dict_to_expand[key] = _repeat_interleave_samples(
1843
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1844
+ )
1845
+ elif key == "pixel_values_videos":
1846
+ samples = torch.split(video_grid_thw, list(video_nums))
1847
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1848
+ dict_to_expand[key] = _repeat_interleave_samples(
1849
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1850
+ )
1851
+ elif key == "video_grid_thw":
1852
+ lengths = list(video_nums)
1853
+ dict_to_expand[key] = _repeat_interleave_samples(
1854
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1855
+ )
1856
+ elif key == "second_per_grid_ts":
1857
+ dict_to_expand[key] = _repeat_interleave_samples(
1858
+ dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size
1859
+ )
1860
+ return dict_to_expand
1861
+
1862
+ def _expand_dict_for_generation(dict_to_expand):
1863
+ for key in dict_to_expand:
1864
+ if (
1865
+ key != "cache_position"
1866
+ and dict_to_expand[key] is not None
1867
+ and isinstance(dict_to_expand[key], torch.Tensor)
1868
+ and key not in visual_keys
1869
+ ):
1870
+ dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
1871
+ return dict_to_expand
1872
+
1873
+ model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
1874
+
1875
+ if input_ids is not None:
1876
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
1877
+
1878
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
1879
+
1880
+ if is_encoder_decoder:
1881
+ if model_kwargs.get("encoder_outputs") is None:
1882
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
1883
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
1884
+
1885
+ return input_ids, model_kwargs
1886
+
1887
+
1888
+ __all__ = [
1889
+ "Ernie4_5_VL_MoePreTrainedModel",
1890
+ "Ernie4_5_VL_MoeForConditionalGeneration",
1891
+ "Ernie4_5_VL_MoeModel",
1892
+ "Ernie4_5_VL_MoeTextModel",
1893
+ "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
1894
+ "Ernie4_5_VL_MoeVariableResolutionResamplerModel",
1895
+ ]