transformers 5.0.0__py3-none-any.whl → 5.0.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1606) hide show
  1. transformers/__init__.py +36 -55
  2. transformers/activations.py +1 -1
  3. transformers/audio_utils.py +33 -32
  4. transformers/cache_utils.py +139 -32
  5. transformers/cli/chat.py +3 -3
  6. transformers/cli/serve.py +19 -49
  7. transformers/cli/transformers.py +1 -2
  8. transformers/configuration_utils.py +155 -129
  9. transformers/conversion_mapping.py +22 -158
  10. transformers/convert_slow_tokenizer.py +17 -227
  11. transformers/core_model_loading.py +185 -528
  12. transformers/data/data_collator.py +4 -12
  13. transformers/data/processors/glue.py +1 -0
  14. transformers/data/processors/utils.py +1 -0
  15. transformers/data/processors/xnli.py +1 -0
  16. transformers/dependency_versions_check.py +1 -0
  17. transformers/dependency_versions_table.py +7 -5
  18. transformers/distributed/configuration_utils.py +2 -1
  19. transformers/dynamic_module_utils.py +25 -24
  20. transformers/feature_extraction_sequence_utils.py +23 -19
  21. transformers/feature_extraction_utils.py +33 -64
  22. transformers/file_utils.py +1 -0
  23. transformers/generation/__init__.py +1 -11
  24. transformers/generation/candidate_generator.py +33 -80
  25. transformers/generation/configuration_utils.py +133 -189
  26. transformers/generation/continuous_batching/__init__.py +1 -4
  27. transformers/generation/continuous_batching/cache.py +25 -83
  28. transformers/generation/continuous_batching/cache_manager.py +45 -155
  29. transformers/generation/continuous_batching/continuous_api.py +147 -270
  30. transformers/generation/continuous_batching/requests.py +3 -51
  31. transformers/generation/continuous_batching/scheduler.py +105 -160
  32. transformers/generation/logits_process.py +128 -0
  33. transformers/generation/stopping_criteria.py +1 -1
  34. transformers/generation/streamers.py +1 -0
  35. transformers/generation/utils.py +123 -122
  36. transformers/generation/watermarking.py +6 -8
  37. transformers/hf_argparser.py +13 -9
  38. transformers/hyperparameter_search.py +2 -1
  39. transformers/image_processing_base.py +23 -12
  40. transformers/image_processing_utils.py +15 -11
  41. transformers/image_processing_utils_fast.py +75 -85
  42. transformers/image_transforms.py +42 -73
  43. transformers/image_utils.py +32 -30
  44. transformers/initialization.py +0 -37
  45. transformers/integrations/__init__.py +2 -16
  46. transformers/integrations/accelerate.py +113 -58
  47. transformers/integrations/aqlm.py +66 -36
  48. transformers/integrations/awq.py +516 -45
  49. transformers/integrations/bitnet.py +105 -47
  50. transformers/integrations/bitsandbytes.py +202 -91
  51. transformers/integrations/deepspeed.py +4 -161
  52. transformers/integrations/eetq.py +82 -84
  53. transformers/integrations/executorch.py +1 -1
  54. transformers/integrations/fbgemm_fp8.py +145 -190
  55. transformers/integrations/finegrained_fp8.py +215 -249
  56. transformers/integrations/flash_attention.py +3 -3
  57. transformers/integrations/flex_attention.py +1 -1
  58. transformers/integrations/fp_quant.py +0 -90
  59. transformers/integrations/ggml.py +2 -11
  60. transformers/integrations/higgs.py +62 -37
  61. transformers/integrations/hub_kernels.py +8 -65
  62. transformers/integrations/integration_utils.py +3 -47
  63. transformers/integrations/mistral.py +0 -12
  64. transformers/integrations/mxfp4.py +80 -33
  65. transformers/integrations/peft.py +191 -483
  66. transformers/integrations/quanto.py +56 -77
  67. transformers/integrations/spqr.py +90 -42
  68. transformers/integrations/tensor_parallel.py +221 -167
  69. transformers/integrations/torchao.py +43 -35
  70. transformers/integrations/vptq.py +59 -40
  71. transformers/kernels/__init__.py +0 -0
  72. transformers/{models/pe_audio_video/processing_pe_audio_video.py → kernels/falcon_mamba/__init__.py} +3 -12
  73. transformers/kernels/falcon_mamba/selective_scan_with_ln_interface.py +529 -0
  74. transformers/loss/loss_utils.py +0 -2
  75. transformers/masking_utils.py +55 -51
  76. transformers/model_debugging_utils.py +5 -4
  77. transformers/modelcard.py +194 -15
  78. transformers/modeling_attn_mask_utils.py +19 -19
  79. transformers/modeling_flash_attention_utils.py +27 -27
  80. transformers/modeling_gguf_pytorch_utils.py +24 -79
  81. transformers/modeling_layers.py +22 -21
  82. transformers/modeling_outputs.py +253 -242
  83. transformers/modeling_rope_utils.py +117 -138
  84. transformers/modeling_utils.py +739 -850
  85. transformers/models/__init__.py +0 -27
  86. transformers/models/afmoe/configuration_afmoe.py +33 -40
  87. transformers/models/afmoe/modeling_afmoe.py +54 -42
  88. transformers/models/afmoe/modular_afmoe.py +33 -23
  89. transformers/models/aimv2/configuration_aimv2.py +10 -2
  90. transformers/models/aimv2/modeling_aimv2.py +42 -47
  91. transformers/models/aimv2/modular_aimv2.py +19 -17
  92. transformers/models/albert/configuration_albert.py +2 -8
  93. transformers/models/albert/modeling_albert.py +69 -70
  94. transformers/models/albert/tokenization_albert.py +14 -5
  95. transformers/models/align/configuration_align.py +6 -8
  96. transformers/models/align/modeling_align.py +89 -94
  97. transformers/models/align/processing_align.py +30 -2
  98. transformers/models/altclip/configuration_altclip.py +7 -4
  99. transformers/models/altclip/modeling_altclip.py +103 -114
  100. transformers/models/altclip/processing_altclip.py +15 -2
  101. transformers/models/apertus/__init__.py +1 -0
  102. transformers/models/apertus/configuration_apertus.py +28 -23
  103. transformers/models/apertus/modeling_apertus.py +40 -39
  104. transformers/models/apertus/modular_apertus.py +38 -37
  105. transformers/models/arcee/configuration_arcee.py +30 -25
  106. transformers/models/arcee/modeling_arcee.py +39 -36
  107. transformers/models/arcee/modular_arcee.py +23 -20
  108. transformers/models/aria/configuration_aria.py +44 -31
  109. transformers/models/aria/image_processing_aria.py +27 -25
  110. transformers/models/aria/modeling_aria.py +106 -110
  111. transformers/models/aria/modular_aria.py +127 -118
  112. transformers/models/aria/processing_aria.py +35 -28
  113. transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +1 -0
  114. transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +6 -3
  115. transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +8 -6
  116. transformers/models/audioflamingo3/__init__.py +1 -0
  117. transformers/models/audioflamingo3/configuration_audioflamingo3.py +1 -0
  118. transformers/models/audioflamingo3/modeling_audioflamingo3.py +49 -58
  119. transformers/models/audioflamingo3/modular_audioflamingo3.py +43 -53
  120. transformers/models/audioflamingo3/processing_audioflamingo3.py +30 -33
  121. transformers/models/auto/auto_factory.py +7 -6
  122. transformers/models/auto/configuration_auto.py +5 -66
  123. transformers/models/auto/feature_extraction_auto.py +10 -14
  124. transformers/models/auto/image_processing_auto.py +41 -32
  125. transformers/models/auto/modeling_auto.py +188 -46
  126. transformers/models/auto/processing_auto.py +11 -24
  127. transformers/models/auto/tokenization_auto.py +588 -171
  128. transformers/models/auto/video_processing_auto.py +10 -12
  129. transformers/models/autoformer/configuration_autoformer.py +7 -4
  130. transformers/models/autoformer/modeling_autoformer.py +101 -104
  131. transformers/models/aya_vision/configuration_aya_vision.py +1 -4
  132. transformers/models/aya_vision/modeling_aya_vision.py +102 -71
  133. transformers/models/aya_vision/modular_aya_vision.py +74 -46
  134. transformers/models/aya_vision/processing_aya_vision.py +53 -25
  135. transformers/models/bamba/configuration_bamba.py +39 -34
  136. transformers/models/bamba/modeling_bamba.py +86 -82
  137. transformers/models/bamba/modular_bamba.py +72 -70
  138. transformers/models/bark/configuration_bark.py +8 -6
  139. transformers/models/bark/generation_configuration_bark.py +5 -3
  140. transformers/models/bark/modeling_bark.py +57 -54
  141. transformers/models/bark/processing_bark.py +41 -19
  142. transformers/models/bart/configuration_bart.py +6 -9
  143. transformers/models/bart/modeling_bart.py +126 -135
  144. transformers/models/barthez/tokenization_barthez.py +11 -3
  145. transformers/models/bartpho/tokenization_bartpho.py +7 -6
  146. transformers/models/beit/configuration_beit.py +11 -0
  147. transformers/models/beit/image_processing_beit.py +56 -53
  148. transformers/models/beit/image_processing_beit_fast.py +12 -10
  149. transformers/models/beit/modeling_beit.py +60 -69
  150. transformers/models/bert/configuration_bert.py +2 -12
  151. transformers/models/bert/modeling_bert.py +122 -114
  152. transformers/models/bert/tokenization_bert.py +23 -8
  153. transformers/models/bert/tokenization_bert_legacy.py +5 -3
  154. transformers/models/bert_generation/configuration_bert_generation.py +2 -17
  155. transformers/models/bert_generation/modeling_bert_generation.py +49 -49
  156. transformers/models/bert_generation/tokenization_bert_generation.py +3 -2
  157. transformers/models/bert_japanese/tokenization_bert_japanese.py +6 -5
  158. transformers/models/bertweet/tokenization_bertweet.py +3 -1
  159. transformers/models/big_bird/configuration_big_bird.py +9 -12
  160. transformers/models/big_bird/modeling_big_bird.py +109 -116
  161. transformers/models/big_bird/tokenization_big_bird.py +43 -16
  162. transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +9 -9
  163. transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +117 -130
  164. transformers/models/biogpt/configuration_biogpt.py +2 -8
  165. transformers/models/biogpt/modeling_biogpt.py +76 -72
  166. transformers/models/biogpt/modular_biogpt.py +66 -62
  167. transformers/models/biogpt/tokenization_biogpt.py +5 -3
  168. transformers/models/bit/configuration_bit.py +1 -0
  169. transformers/models/bit/image_processing_bit.py +24 -21
  170. transformers/models/bit/image_processing_bit_fast.py +1 -0
  171. transformers/models/bit/modeling_bit.py +12 -25
  172. transformers/models/bitnet/configuration_bitnet.py +28 -23
  173. transformers/models/bitnet/modeling_bitnet.py +39 -36
  174. transformers/models/bitnet/modular_bitnet.py +6 -4
  175. transformers/models/blenderbot/configuration_blenderbot.py +5 -8
  176. transformers/models/blenderbot/modeling_blenderbot.py +96 -77
  177. transformers/models/blenderbot/tokenization_blenderbot.py +24 -18
  178. transformers/models/blenderbot_small/configuration_blenderbot_small.py +5 -8
  179. transformers/models/blenderbot_small/modeling_blenderbot_small.py +69 -79
  180. transformers/models/blenderbot_small/tokenization_blenderbot_small.py +3 -1
  181. transformers/models/blip/configuration_blip.py +10 -9
  182. transformers/models/blip/image_processing_blip.py +20 -17
  183. transformers/models/blip/image_processing_blip_fast.py +1 -0
  184. transformers/models/blip/modeling_blip.py +108 -117
  185. transformers/models/blip/modeling_blip_text.py +65 -73
  186. transformers/models/blip/processing_blip.py +36 -5
  187. transformers/models/blip_2/configuration_blip_2.py +2 -2
  188. transformers/models/blip_2/modeling_blip_2.py +118 -146
  189. transformers/models/blip_2/processing_blip_2.py +38 -8
  190. transformers/models/bloom/configuration_bloom.py +2 -5
  191. transformers/models/bloom/modeling_bloom.py +104 -77
  192. transformers/models/blt/configuration_blt.py +86 -94
  193. transformers/models/blt/modeling_blt.py +81 -238
  194. transformers/models/blt/modular_blt.py +65 -228
  195. transformers/models/bridgetower/configuration_bridgetower.py +2 -7
  196. transformers/models/bridgetower/image_processing_bridgetower.py +35 -34
  197. transformers/models/bridgetower/image_processing_bridgetower_fast.py +16 -13
  198. transformers/models/bridgetower/modeling_bridgetower.py +119 -141
  199. transformers/models/bridgetower/processing_bridgetower.py +16 -2
  200. transformers/models/bros/configuration_bros.py +18 -24
  201. transformers/models/bros/modeling_bros.py +80 -90
  202. transformers/models/bros/processing_bros.py +12 -2
  203. transformers/models/byt5/tokenization_byt5.py +6 -4
  204. transformers/models/camembert/configuration_camembert.py +2 -8
  205. transformers/models/camembert/modeling_camembert.py +195 -196
  206. transformers/models/camembert/modular_camembert.py +54 -51
  207. transformers/models/camembert/tokenization_camembert.py +13 -6
  208. transformers/models/canine/configuration_canine.py +2 -4
  209. transformers/models/canine/modeling_canine.py +75 -84
  210. transformers/models/canine/tokenization_canine.py +1 -2
  211. transformers/models/chameleon/configuration_chameleon.py +34 -29
  212. transformers/models/chameleon/image_processing_chameleon.py +24 -21
  213. transformers/models/chameleon/image_processing_chameleon_fast.py +6 -5
  214. transformers/models/chameleon/modeling_chameleon.py +93 -142
  215. transformers/models/chameleon/processing_chameleon.py +41 -16
  216. transformers/models/chinese_clip/configuration_chinese_clip.py +8 -10
  217. transformers/models/chinese_clip/image_processing_chinese_clip.py +24 -21
  218. transformers/models/chinese_clip/image_processing_chinese_clip_fast.py +1 -0
  219. transformers/models/chinese_clip/modeling_chinese_clip.py +92 -96
  220. transformers/models/chinese_clip/processing_chinese_clip.py +15 -2
  221. transformers/models/clap/configuration_clap.py +9 -4
  222. transformers/models/clap/feature_extraction_clap.py +12 -11
  223. transformers/models/clap/modeling_clap.py +123 -136
  224. transformers/models/clap/processing_clap.py +15 -2
  225. transformers/models/clip/configuration_clip.py +2 -4
  226. transformers/models/clip/image_processing_clip.py +24 -21
  227. transformers/models/clip/image_processing_clip_fast.py +1 -9
  228. transformers/models/clip/modeling_clip.py +65 -65
  229. transformers/models/clip/processing_clip.py +14 -2
  230. transformers/models/clip/tokenization_clip.py +46 -21
  231. transformers/models/clipseg/configuration_clipseg.py +2 -4
  232. transformers/models/clipseg/modeling_clipseg.py +109 -119
  233. transformers/models/clipseg/processing_clipseg.py +42 -19
  234. transformers/models/clvp/configuration_clvp.py +5 -15
  235. transformers/models/clvp/feature_extraction_clvp.py +10 -7
  236. transformers/models/clvp/modeling_clvp.py +146 -155
  237. transformers/models/clvp/number_normalizer.py +2 -1
  238. transformers/models/clvp/processing_clvp.py +20 -3
  239. transformers/models/clvp/tokenization_clvp.py +64 -1
  240. transformers/models/code_llama/tokenization_code_llama.py +44 -18
  241. transformers/models/codegen/configuration_codegen.py +4 -4
  242. transformers/models/codegen/modeling_codegen.py +53 -63
  243. transformers/models/codegen/tokenization_codegen.py +47 -17
  244. transformers/models/cohere/configuration_cohere.py +30 -25
  245. transformers/models/cohere/modeling_cohere.py +42 -40
  246. transformers/models/cohere/modular_cohere.py +29 -26
  247. transformers/models/cohere/tokenization_cohere.py +46 -15
  248. transformers/models/cohere2/configuration_cohere2.py +32 -31
  249. transformers/models/cohere2/modeling_cohere2.py +44 -42
  250. transformers/models/cohere2/modular_cohere2.py +54 -54
  251. transformers/models/cohere2_vision/image_processing_cohere2_vision_fast.py +14 -13
  252. transformers/models/cohere2_vision/modeling_cohere2_vision.py +58 -59
  253. transformers/models/cohere2_vision/modular_cohere2_vision.py +46 -45
  254. transformers/models/cohere2_vision/processing_cohere2_vision.py +36 -6
  255. transformers/models/colpali/configuration_colpali.py +1 -0
  256. transformers/models/colpali/modeling_colpali.py +16 -14
  257. transformers/models/colpali/modular_colpali.py +51 -11
  258. transformers/models/colpali/processing_colpali.py +52 -14
  259. transformers/models/colqwen2/modeling_colqwen2.py +28 -28
  260. transformers/models/colqwen2/modular_colqwen2.py +74 -37
  261. transformers/models/colqwen2/processing_colqwen2.py +52 -16
  262. transformers/models/conditional_detr/configuration_conditional_detr.py +2 -1
  263. transformers/models/conditional_detr/image_processing_conditional_detr.py +70 -67
  264. transformers/models/conditional_detr/image_processing_conditional_detr_fast.py +36 -36
  265. transformers/models/conditional_detr/modeling_conditional_detr.py +87 -99
  266. transformers/models/conditional_detr/modular_conditional_detr.py +3 -49
  267. transformers/models/convbert/configuration_convbert.py +8 -11
  268. transformers/models/convbert/modeling_convbert.py +87 -94
  269. transformers/models/convbert/tokenization_convbert.py +1 -0
  270. transformers/models/convnext/configuration_convnext.py +1 -0
  271. transformers/models/convnext/image_processing_convnext.py +23 -20
  272. transformers/models/convnext/image_processing_convnext_fast.py +21 -16
  273. transformers/models/convnext/modeling_convnext.py +12 -9
  274. transformers/models/convnextv2/configuration_convnextv2.py +1 -0
  275. transformers/models/convnextv2/modeling_convnextv2.py +12 -9
  276. transformers/models/cpm/tokenization_cpm.py +7 -6
  277. transformers/models/cpm/tokenization_cpm_fast.py +5 -3
  278. transformers/models/cpmant/configuration_cpmant.py +1 -4
  279. transformers/models/cpmant/modeling_cpmant.py +40 -38
  280. transformers/models/cpmant/tokenization_cpmant.py +3 -1
  281. transformers/models/csm/configuration_csm.py +66 -58
  282. transformers/models/csm/generation_csm.py +35 -31
  283. transformers/models/csm/modeling_csm.py +85 -85
  284. transformers/models/csm/modular_csm.py +58 -58
  285. transformers/models/csm/processing_csm.py +68 -25
  286. transformers/models/ctrl/configuration_ctrl.py +1 -16
  287. transformers/models/ctrl/modeling_ctrl.py +44 -54
  288. transformers/models/ctrl/tokenization_ctrl.py +1 -0
  289. transformers/models/cvt/configuration_cvt.py +1 -0
  290. transformers/models/cvt/modeling_cvt.py +16 -20
  291. transformers/models/cwm/__init__.py +1 -0
  292. transformers/models/cwm/configuration_cwm.py +12 -8
  293. transformers/models/cwm/modeling_cwm.py +39 -37
  294. transformers/models/cwm/modular_cwm.py +12 -10
  295. transformers/models/d_fine/configuration_d_fine.py +5 -7
  296. transformers/models/d_fine/modeling_d_fine.py +128 -138
  297. transformers/models/d_fine/modular_d_fine.py +18 -33
  298. transformers/models/dab_detr/configuration_dab_detr.py +3 -6
  299. transformers/models/dab_detr/modeling_dab_detr.py +75 -81
  300. transformers/models/dac/configuration_dac.py +1 -0
  301. transformers/models/dac/feature_extraction_dac.py +9 -6
  302. transformers/models/dac/modeling_dac.py +26 -24
  303. transformers/models/data2vec/configuration_data2vec_audio.py +2 -4
  304. transformers/models/data2vec/configuration_data2vec_text.py +3 -11
  305. transformers/models/data2vec/configuration_data2vec_vision.py +1 -0
  306. transformers/models/data2vec/modeling_data2vec_audio.py +56 -57
  307. transformers/models/data2vec/modeling_data2vec_text.py +93 -98
  308. transformers/models/data2vec/modeling_data2vec_vision.py +45 -49
  309. transformers/models/data2vec/modular_data2vec_audio.py +1 -6
  310. transformers/models/data2vec/modular_data2vec_text.py +54 -58
  311. transformers/models/dbrx/configuration_dbrx.py +22 -36
  312. transformers/models/dbrx/modeling_dbrx.py +45 -42
  313. transformers/models/dbrx/modular_dbrx.py +33 -31
  314. transformers/models/deberta/configuration_deberta.py +1 -6
  315. transformers/models/deberta/modeling_deberta.py +60 -64
  316. transformers/models/deberta/tokenization_deberta.py +21 -9
  317. transformers/models/deberta_v2/configuration_deberta_v2.py +1 -6
  318. transformers/models/deberta_v2/modeling_deberta_v2.py +65 -71
  319. transformers/models/deberta_v2/tokenization_deberta_v2.py +29 -11
  320. transformers/models/decision_transformer/configuration_decision_transformer.py +2 -3
  321. transformers/models/decision_transformer/modeling_decision_transformer.py +56 -60
  322. transformers/models/deepseek_v2/configuration_deepseek_v2.py +44 -39
  323. transformers/models/deepseek_v2/modeling_deepseek_v2.py +43 -43
  324. transformers/models/deepseek_v2/modular_deepseek_v2.py +49 -48
  325. transformers/models/deepseek_v3/configuration_deepseek_v3.py +45 -40
  326. transformers/models/deepseek_v3/modeling_deepseek_v3.py +42 -45
  327. transformers/models/deepseek_v3/modular_deepseek_v3.py +9 -14
  328. transformers/models/deepseek_vl/configuration_deepseek_vl.py +3 -2
  329. transformers/models/deepseek_vl/image_processing_deepseek_vl.py +26 -25
  330. transformers/models/deepseek_vl/image_processing_deepseek_vl_fast.py +10 -10
  331. transformers/models/deepseek_vl/modeling_deepseek_vl.py +48 -57
  332. transformers/models/deepseek_vl/modular_deepseek_vl.py +43 -14
  333. transformers/models/deepseek_vl/processing_deepseek_vl.py +41 -10
  334. transformers/models/deepseek_vl_hybrid/configuration_deepseek_vl_hybrid.py +5 -3
  335. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py +35 -35
  336. transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid_fast.py +24 -20
  337. transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py +61 -109
  338. transformers/models/deepseek_vl_hybrid/modular_deepseek_vl_hybrid.py +118 -146
  339. transformers/models/deepseek_vl_hybrid/processing_deepseek_vl_hybrid.py +44 -12
  340. transformers/models/deformable_detr/configuration_deformable_detr.py +3 -2
  341. transformers/models/deformable_detr/image_processing_deformable_detr.py +61 -59
  342. transformers/models/deformable_detr/image_processing_deformable_detr_fast.py +28 -28
  343. transformers/models/deformable_detr/modeling_deformable_detr.py +82 -88
  344. transformers/models/deformable_detr/modular_deformable_detr.py +3 -1
  345. transformers/models/deit/configuration_deit.py +1 -0
  346. transformers/models/deit/image_processing_deit.py +21 -18
  347. transformers/models/deit/image_processing_deit_fast.py +1 -0
  348. transformers/models/deit/modeling_deit.py +22 -24
  349. transformers/models/depth_anything/configuration_depth_anything.py +4 -2
  350. transformers/models/depth_anything/modeling_depth_anything.py +10 -10
  351. transformers/models/depth_pro/configuration_depth_pro.py +1 -0
  352. transformers/models/depth_pro/image_processing_depth_pro.py +23 -22
  353. transformers/models/depth_pro/image_processing_depth_pro_fast.py +10 -8
  354. transformers/models/depth_pro/modeling_depth_pro.py +27 -31
  355. transformers/models/detr/configuration_detr.py +2 -1
  356. transformers/models/detr/image_processing_detr.py +66 -64
  357. transformers/models/detr/image_processing_detr_fast.py +34 -33
  358. transformers/models/detr/modeling_detr.py +79 -95
  359. transformers/models/dia/configuration_dia.py +15 -9
  360. transformers/models/dia/feature_extraction_dia.py +9 -6
  361. transformers/models/dia/generation_dia.py +50 -48
  362. transformers/models/dia/modeling_dia.py +69 -78
  363. transformers/models/dia/modular_dia.py +56 -64
  364. transformers/models/dia/processing_dia.py +29 -39
  365. transformers/models/dia/tokenization_dia.py +6 -3
  366. transformers/models/diffllama/configuration_diffllama.py +30 -25
  367. transformers/models/diffllama/modeling_diffllama.py +49 -46
  368. transformers/models/diffllama/modular_diffllama.py +19 -17
  369. transformers/models/dinat/configuration_dinat.py +1 -0
  370. transformers/models/dinat/modeling_dinat.py +44 -47
  371. transformers/models/dinov2/configuration_dinov2.py +1 -0
  372. transformers/models/dinov2/modeling_dinov2.py +15 -15
  373. transformers/models/dinov2_with_registers/configuration_dinov2_with_registers.py +1 -1
  374. transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py +15 -16
  375. transformers/models/dinov2_with_registers/modular_dinov2_with_registers.py +9 -9
  376. transformers/models/dinov3_convnext/configuration_dinov3_convnext.py +7 -4
  377. transformers/models/dinov3_convnext/modeling_dinov3_convnext.py +6 -3
  378. transformers/models/dinov3_vit/configuration_dinov3_vit.py +8 -5
  379. transformers/models/dinov3_vit/image_processing_dinov3_vit_fast.py +9 -7
  380. transformers/models/dinov3_vit/modeling_dinov3_vit.py +18 -19
  381. transformers/models/dinov3_vit/modular_dinov3_vit.py +15 -16
  382. transformers/models/distilbert/configuration_distilbert.py +2 -8
  383. transformers/models/distilbert/modeling_distilbert.py +55 -55
  384. transformers/models/distilbert/tokenization_distilbert.py +1 -13
  385. transformers/models/doge/__init__.py +1 -0
  386. transformers/models/doge/configuration_doge.py +32 -39
  387. transformers/models/doge/modeling_doge.py +49 -45
  388. transformers/models/doge/modular_doge.py +63 -71
  389. transformers/models/donut/configuration_donut_swin.py +1 -0
  390. transformers/models/donut/image_processing_donut.py +29 -26
  391. transformers/models/donut/image_processing_donut_fast.py +15 -9
  392. transformers/models/donut/modeling_donut_swin.py +58 -62
  393. transformers/models/donut/processing_donut.py +26 -5
  394. transformers/models/dots1/configuration_dots1.py +33 -41
  395. transformers/models/dots1/modeling_dots1.py +45 -54
  396. transformers/models/dots1/modular_dots1.py +4 -5
  397. transformers/models/dpr/configuration_dpr.py +2 -19
  398. transformers/models/dpr/modeling_dpr.py +39 -42
  399. transformers/models/dpr/tokenization_dpr.py +9 -19
  400. transformers/models/dpr/tokenization_dpr_fast.py +9 -7
  401. transformers/models/dpt/configuration_dpt.py +2 -1
  402. transformers/models/dpt/image_processing_dpt.py +66 -65
  403. transformers/models/dpt/image_processing_dpt_fast.py +20 -18
  404. transformers/models/dpt/modeling_dpt.py +30 -32
  405. transformers/models/dpt/modular_dpt.py +17 -15
  406. transformers/models/edgetam/configuration_edgetam.py +3 -2
  407. transformers/models/edgetam/modeling_edgetam.py +86 -86
  408. transformers/models/edgetam/modular_edgetam.py +26 -21
  409. transformers/models/edgetam_video/__init__.py +1 -0
  410. transformers/models/edgetam_video/configuration_edgetam_video.py +1 -0
  411. transformers/models/edgetam_video/modeling_edgetam_video.py +158 -169
  412. transformers/models/edgetam_video/modular_edgetam_video.py +37 -30
  413. transformers/models/efficientloftr/configuration_efficientloftr.py +5 -4
  414. transformers/models/efficientloftr/image_processing_efficientloftr.py +16 -14
  415. transformers/models/efficientloftr/image_processing_efficientloftr_fast.py +9 -9
  416. transformers/models/efficientloftr/modeling_efficientloftr.py +38 -59
  417. transformers/models/efficientloftr/modular_efficientloftr.py +3 -1
  418. transformers/models/efficientnet/configuration_efficientnet.py +1 -0
  419. transformers/models/efficientnet/image_processing_efficientnet.py +32 -28
  420. transformers/models/efficientnet/image_processing_efficientnet_fast.py +19 -17
  421. transformers/models/efficientnet/modeling_efficientnet.py +15 -19
  422. transformers/models/electra/configuration_electra.py +3 -13
  423. transformers/models/electra/modeling_electra.py +103 -108
  424. transformers/models/emu3/configuration_emu3.py +17 -13
  425. transformers/models/emu3/image_processing_emu3.py +39 -44
  426. transformers/models/emu3/modeling_emu3.py +108 -148
  427. transformers/models/emu3/modular_emu3.py +73 -115
  428. transformers/models/emu3/processing_emu3.py +43 -18
  429. transformers/models/encodec/configuration_encodec.py +4 -2
  430. transformers/models/encodec/feature_extraction_encodec.py +13 -10
  431. transformers/models/encodec/modeling_encodec.py +29 -39
  432. transformers/models/encoder_decoder/configuration_encoder_decoder.py +2 -12
  433. transformers/models/encoder_decoder/modeling_encoder_decoder.py +43 -37
  434. transformers/models/eomt/configuration_eomt.py +1 -0
  435. transformers/models/eomt/image_processing_eomt.py +56 -66
  436. transformers/models/eomt/image_processing_eomt_fast.py +33 -76
  437. transformers/models/eomt/modeling_eomt.py +18 -23
  438. transformers/models/eomt/modular_eomt.py +13 -18
  439. transformers/models/ernie/configuration_ernie.py +3 -24
  440. transformers/models/ernie/modeling_ernie.py +132 -127
  441. transformers/models/ernie/modular_ernie.py +103 -97
  442. transformers/models/ernie4_5/configuration_ernie4_5.py +27 -23
  443. transformers/models/ernie4_5/modeling_ernie4_5.py +38 -36
  444. transformers/models/ernie4_5/modular_ernie4_5.py +4 -3
  445. transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py +36 -32
  446. transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py +55 -56
  447. transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py +46 -18
  448. transformers/models/esm/configuration_esm.py +15 -11
  449. transformers/models/esm/modeling_esm.py +34 -38
  450. transformers/models/esm/modeling_esmfold.py +49 -53
  451. transformers/models/esm/openfold_utils/chunk_utils.py +6 -6
  452. transformers/models/esm/openfold_utils/loss.py +2 -1
  453. transformers/models/esm/openfold_utils/protein.py +16 -15
  454. transformers/models/esm/openfold_utils/tensor_utils.py +6 -6
  455. transformers/models/esm/tokenization_esm.py +4 -2
  456. transformers/models/evolla/configuration_evolla.py +40 -50
  457. transformers/models/evolla/modeling_evolla.py +66 -71
  458. transformers/models/evolla/modular_evolla.py +47 -53
  459. transformers/models/evolla/processing_evolla.py +35 -23
  460. transformers/models/exaone4/configuration_exaone4.py +25 -23
  461. transformers/models/exaone4/modeling_exaone4.py +38 -35
  462. transformers/models/exaone4/modular_exaone4.py +46 -44
  463. transformers/models/falcon/configuration_falcon.py +26 -31
  464. transformers/models/falcon/modeling_falcon.py +80 -82
  465. transformers/models/falcon_h1/configuration_falcon_h1.py +51 -45
  466. transformers/models/falcon_h1/modeling_falcon_h1.py +82 -85
  467. transformers/models/falcon_h1/modular_falcon_h1.py +51 -56
  468. transformers/models/falcon_mamba/configuration_falcon_mamba.py +2 -1
  469. transformers/models/falcon_mamba/modeling_falcon_mamba.py +82 -75
  470. transformers/models/falcon_mamba/modular_falcon_mamba.py +45 -28
  471. transformers/models/fastspeech2_conformer/configuration_fastspeech2_conformer.py +6 -2
  472. transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py +60 -76
  473. transformers/models/fastspeech2_conformer/tokenization_fastspeech2_conformer.py +3 -2
  474. transformers/models/flaubert/configuration_flaubert.py +5 -10
  475. transformers/models/flaubert/modeling_flaubert.py +143 -145
  476. transformers/models/flaubert/tokenization_flaubert.py +5 -3
  477. transformers/models/flava/configuration_flava.py +6 -5
  478. transformers/models/flava/image_processing_flava.py +67 -66
  479. transformers/models/flava/image_processing_flava_fast.py +49 -46
  480. transformers/models/flava/modeling_flava.py +136 -153
  481. transformers/models/flava/processing_flava.py +12 -2
  482. transformers/models/flex_olmo/__init__.py +1 -0
  483. transformers/models/flex_olmo/configuration_flex_olmo.py +32 -28
  484. transformers/models/flex_olmo/modeling_flex_olmo.py +47 -47
  485. transformers/models/flex_olmo/modular_flex_olmo.py +44 -40
  486. transformers/models/florence2/configuration_florence2.py +1 -0
  487. transformers/models/florence2/modeling_florence2.py +69 -111
  488. transformers/models/florence2/modular_florence2.py +101 -104
  489. transformers/models/florence2/processing_florence2.py +47 -18
  490. transformers/models/fnet/configuration_fnet.py +2 -6
  491. transformers/models/fnet/modeling_fnet.py +80 -83
  492. transformers/models/fnet/tokenization_fnet.py +1 -0
  493. transformers/models/focalnet/configuration_focalnet.py +1 -0
  494. transformers/models/focalnet/modeling_focalnet.py +45 -51
  495. transformers/models/fsmt/configuration_fsmt.py +17 -12
  496. transformers/models/fsmt/modeling_fsmt.py +48 -49
  497. transformers/models/fsmt/tokenization_fsmt.py +5 -3
  498. transformers/models/funnel/configuration_funnel.py +1 -8
  499. transformers/models/funnel/modeling_funnel.py +93 -99
  500. transformers/models/funnel/tokenization_funnel.py +27 -17
  501. transformers/models/fuyu/configuration_fuyu.py +34 -28
  502. transformers/models/fuyu/image_processing_fuyu.py +31 -29
  503. transformers/models/fuyu/image_processing_fuyu_fast.py +17 -17
  504. transformers/models/fuyu/modeling_fuyu.py +53 -53
  505. transformers/models/fuyu/processing_fuyu.py +34 -23
  506. transformers/models/gemma/configuration_gemma.py +30 -25
  507. transformers/models/gemma/modeling_gemma.py +50 -46
  508. transformers/models/gemma/modular_gemma.py +47 -42
  509. transformers/models/gemma/tokenization_gemma.py +30 -10
  510. transformers/models/gemma2/configuration_gemma2.py +35 -30
  511. transformers/models/gemma2/modeling_gemma2.py +42 -39
  512. transformers/models/gemma2/modular_gemma2.py +66 -63
  513. transformers/models/gemma3/configuration_gemma3.py +44 -44
  514. transformers/models/gemma3/image_processing_gemma3.py +31 -29
  515. transformers/models/gemma3/image_processing_gemma3_fast.py +13 -11
  516. transformers/models/gemma3/modeling_gemma3.py +207 -159
  517. transformers/models/gemma3/modular_gemma3.py +204 -153
  518. transformers/models/gemma3/processing_gemma3.py +5 -5
  519. transformers/models/gemma3n/configuration_gemma3n.py +26 -36
  520. transformers/models/gemma3n/feature_extraction_gemma3n.py +11 -9
  521. transformers/models/gemma3n/modeling_gemma3n.py +356 -222
  522. transformers/models/gemma3n/modular_gemma3n.py +207 -230
  523. transformers/models/gemma3n/processing_gemma3n.py +26 -12
  524. transformers/models/git/configuration_git.py +8 -5
  525. transformers/models/git/modeling_git.py +204 -266
  526. transformers/models/git/processing_git.py +14 -2
  527. transformers/models/glm/configuration_glm.py +28 -24
  528. transformers/models/glm/modeling_glm.py +40 -37
  529. transformers/models/glm/modular_glm.py +7 -4
  530. transformers/models/glm4/configuration_glm4.py +28 -24
  531. transformers/models/glm4/modeling_glm4.py +42 -40
  532. transformers/models/glm4/modular_glm4.py +10 -8
  533. transformers/models/glm46v/configuration_glm46v.py +1 -0
  534. transformers/models/glm46v/image_processing_glm46v.py +40 -35
  535. transformers/models/glm46v/image_processing_glm46v_fast.py +9 -9
  536. transformers/models/glm46v/modeling_glm46v.py +90 -137
  537. transformers/models/glm46v/modular_glm46v.py +3 -4
  538. transformers/models/glm46v/processing_glm46v.py +41 -7
  539. transformers/models/glm46v/video_processing_glm46v.py +11 -9
  540. transformers/models/glm4_moe/configuration_glm4_moe.py +32 -40
  541. transformers/models/glm4_moe/modeling_glm4_moe.py +42 -45
  542. transformers/models/glm4_moe/modular_glm4_moe.py +34 -42
  543. transformers/models/glm4v/configuration_glm4v.py +20 -18
  544. transformers/models/glm4v/image_processing_glm4v.py +40 -34
  545. transformers/models/glm4v/image_processing_glm4v_fast.py +9 -8
  546. transformers/models/glm4v/modeling_glm4v.py +205 -254
  547. transformers/models/glm4v/modular_glm4v.py +224 -210
  548. transformers/models/glm4v/processing_glm4v.py +41 -7
  549. transformers/models/glm4v/video_processing_glm4v.py +11 -9
  550. transformers/models/glm4v_moe/configuration_glm4v_moe.py +125 -136
  551. transformers/models/glm4v_moe/modeling_glm4v_moe.py +368 -377
  552. transformers/models/glm4v_moe/modular_glm4v_moe.py +169 -83
  553. transformers/models/glpn/configuration_glpn.py +1 -0
  554. transformers/models/glpn/image_processing_glpn.py +12 -11
  555. transformers/models/glpn/image_processing_glpn_fast.py +13 -11
  556. transformers/models/glpn/modeling_glpn.py +14 -16
  557. transformers/models/got_ocr2/configuration_got_ocr2.py +12 -4
  558. transformers/models/got_ocr2/image_processing_got_ocr2.py +24 -22
  559. transformers/models/got_ocr2/image_processing_got_ocr2_fast.py +11 -9
  560. transformers/models/got_ocr2/modeling_got_ocr2.py +80 -77
  561. transformers/models/got_ocr2/modular_got_ocr2.py +51 -54
  562. transformers/models/got_ocr2/processing_got_ocr2.py +63 -42
  563. transformers/models/gpt2/configuration_gpt2.py +2 -13
  564. transformers/models/gpt2/modeling_gpt2.py +115 -120
  565. transformers/models/gpt2/tokenization_gpt2.py +46 -15
  566. transformers/models/gpt_bigcode/configuration_gpt_bigcode.py +2 -5
  567. transformers/models/gpt_bigcode/modeling_gpt_bigcode.py +89 -79
  568. transformers/models/gpt_neo/configuration_gpt_neo.py +2 -9
  569. transformers/models/gpt_neo/modeling_gpt_neo.py +67 -83
  570. transformers/models/gpt_neox/configuration_gpt_neox.py +25 -25
  571. transformers/models/gpt_neox/modeling_gpt_neox.py +75 -76
  572. transformers/models/gpt_neox/modular_gpt_neox.py +66 -67
  573. transformers/models/gpt_neox/tokenization_gpt_neox.py +51 -9
  574. transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py +19 -24
  575. transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py +47 -46
  576. transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +3 -1
  577. transformers/models/gpt_oss/configuration_gpt_oss.py +28 -46
  578. transformers/models/gpt_oss/modeling_gpt_oss.py +121 -83
  579. transformers/models/gpt_oss/modular_gpt_oss.py +103 -64
  580. transformers/models/gpt_sw3/tokenization_gpt_sw3.py +4 -4
  581. transformers/models/gptj/configuration_gptj.py +4 -4
  582. transformers/models/gptj/modeling_gptj.py +87 -101
  583. transformers/models/granite/configuration_granite.py +33 -28
  584. transformers/models/granite/modeling_granite.py +46 -44
  585. transformers/models/granite/modular_granite.py +31 -29
  586. transformers/models/granite_speech/configuration_granite_speech.py +1 -0
  587. transformers/models/granite_speech/feature_extraction_granite_speech.py +3 -1
  588. transformers/models/granite_speech/modeling_granite_speech.py +52 -82
  589. transformers/models/granite_speech/processing_granite_speech.py +4 -11
  590. transformers/models/granitemoe/configuration_granitemoe.py +36 -31
  591. transformers/models/granitemoe/modeling_granitemoe.py +46 -41
  592. transformers/models/granitemoe/modular_granitemoe.py +27 -22
  593. transformers/models/granitemoehybrid/__init__.py +1 -0
  594. transformers/models/granitemoehybrid/configuration_granitemoehybrid.py +47 -46
  595. transformers/models/granitemoehybrid/modeling_granitemoehybrid.py +93 -97
  596. transformers/models/granitemoehybrid/modular_granitemoehybrid.py +21 -54
  597. transformers/models/granitemoeshared/configuration_granitemoeshared.py +37 -33
  598. transformers/models/granitemoeshared/modeling_granitemoeshared.py +61 -54
  599. transformers/models/granitemoeshared/modular_granitemoeshared.py +21 -19
  600. transformers/models/grounding_dino/configuration_grounding_dino.py +4 -6
  601. transformers/models/grounding_dino/image_processing_grounding_dino.py +62 -60
  602. transformers/models/grounding_dino/image_processing_grounding_dino_fast.py +29 -28
  603. transformers/models/grounding_dino/modeling_grounding_dino.py +140 -155
  604. transformers/models/grounding_dino/modular_grounding_dino.py +3 -2
  605. transformers/models/grounding_dino/processing_grounding_dino.py +38 -10
  606. transformers/models/groupvit/configuration_groupvit.py +2 -4
  607. transformers/models/groupvit/modeling_groupvit.py +93 -107
  608. transformers/models/helium/configuration_helium.py +29 -25
  609. transformers/models/helium/modeling_helium.py +40 -38
  610. transformers/models/helium/modular_helium.py +7 -3
  611. transformers/models/herbert/tokenization_herbert.py +28 -10
  612. transformers/models/hgnet_v2/configuration_hgnet_v2.py +1 -0
  613. transformers/models/hgnet_v2/modeling_hgnet_v2.py +10 -24
  614. transformers/models/hgnet_v2/modular_hgnet_v2.py +10 -24
  615. transformers/models/hiera/configuration_hiera.py +1 -0
  616. transformers/models/hiera/modeling_hiera.py +66 -72
  617. transformers/models/hubert/configuration_hubert.py +2 -4
  618. transformers/models/hubert/modeling_hubert.py +37 -42
  619. transformers/models/hubert/modular_hubert.py +11 -13
  620. transformers/models/hunyuan_v1_dense/configuration_hunyuan_v1_dense.py +31 -26
  621. transformers/models/hunyuan_v1_dense/modeling_hunyuan_v1_dense.py +38 -35
  622. transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py +6 -4
  623. transformers/models/hunyuan_v1_moe/__init__.py +1 -1
  624. transformers/models/hunyuan_v1_moe/configuration_hunyuan_v1_moe.py +36 -31
  625. transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py +42 -47
  626. transformers/models/hunyuan_v1_moe/modular_hunyuan_v1_moe.py +9 -9
  627. transformers/models/ibert/configuration_ibert.py +2 -4
  628. transformers/models/ibert/modeling_ibert.py +62 -82
  629. transformers/models/ibert/quant_modules.py +1 -0
  630. transformers/models/idefics/configuration_idefics.py +8 -5
  631. transformers/models/idefics/image_processing_idefics.py +15 -13
  632. transformers/models/idefics/modeling_idefics.py +82 -75
  633. transformers/models/idefics/perceiver.py +3 -1
  634. transformers/models/idefics/processing_idefics.py +48 -32
  635. transformers/models/idefics/vision.py +25 -24
  636. transformers/models/idefics2/configuration_idefics2.py +3 -1
  637. transformers/models/idefics2/image_processing_idefics2.py +32 -31
  638. transformers/models/idefics2/image_processing_idefics2_fast.py +8 -8
  639. transformers/models/idefics2/modeling_idefics2.py +101 -127
  640. transformers/models/idefics2/processing_idefics2.py +68 -10
  641. transformers/models/idefics3/configuration_idefics3.py +4 -1
  642. transformers/models/idefics3/image_processing_idefics3.py +43 -42
  643. transformers/models/idefics3/image_processing_idefics3_fast.py +15 -40
  644. transformers/models/idefics3/modeling_idefics3.py +90 -115
  645. transformers/models/idefics3/processing_idefics3.py +69 -15
  646. transformers/models/ijepa/configuration_ijepa.py +1 -0
  647. transformers/models/ijepa/modeling_ijepa.py +11 -10
  648. transformers/models/ijepa/modular_ijepa.py +7 -5
  649. transformers/models/imagegpt/configuration_imagegpt.py +2 -9
  650. transformers/models/imagegpt/image_processing_imagegpt.py +18 -17
  651. transformers/models/imagegpt/image_processing_imagegpt_fast.py +16 -11
  652. transformers/models/imagegpt/modeling_imagegpt.py +65 -76
  653. transformers/models/informer/configuration_informer.py +9 -6
  654. transformers/models/informer/modeling_informer.py +86 -88
  655. transformers/models/informer/modular_informer.py +16 -14
  656. transformers/models/instructblip/configuration_instructblip.py +2 -2
  657. transformers/models/instructblip/modeling_instructblip.py +63 -103
  658. transformers/models/instructblip/processing_instructblip.py +36 -10
  659. transformers/models/instructblipvideo/configuration_instructblipvideo.py +2 -2
  660. transformers/models/instructblipvideo/modeling_instructblipvideo.py +139 -157
  661. transformers/models/instructblipvideo/modular_instructblipvideo.py +64 -73
  662. transformers/models/instructblipvideo/processing_instructblipvideo.py +33 -14
  663. transformers/models/instructblipvideo/video_processing_instructblipvideo.py +8 -6
  664. transformers/models/internvl/configuration_internvl.py +1 -0
  665. transformers/models/internvl/modeling_internvl.py +106 -85
  666. transformers/models/internvl/modular_internvl.py +67 -47
  667. transformers/models/internvl/processing_internvl.py +45 -12
  668. transformers/models/internvl/video_processing_internvl.py +12 -10
  669. transformers/models/jamba/configuration_jamba.py +8 -5
  670. transformers/models/jamba/modeling_jamba.py +66 -68
  671. transformers/models/jamba/modular_jamba.py +55 -54
  672. transformers/models/janus/configuration_janus.py +1 -0
  673. transformers/models/janus/image_processing_janus.py +37 -35
  674. transformers/models/janus/image_processing_janus_fast.py +20 -18
  675. transformers/models/janus/modeling_janus.py +191 -115
  676. transformers/models/janus/modular_janus.py +84 -133
  677. transformers/models/janus/processing_janus.py +43 -17
  678. transformers/models/jetmoe/configuration_jetmoe.py +26 -24
  679. transformers/models/jetmoe/modeling_jetmoe.py +46 -43
  680. transformers/models/jetmoe/modular_jetmoe.py +33 -31
  681. transformers/models/kosmos2/configuration_kosmos2.py +9 -10
  682. transformers/models/kosmos2/modeling_kosmos2.py +173 -208
  683. transformers/models/kosmos2/processing_kosmos2.py +55 -40
  684. transformers/models/kosmos2_5/__init__.py +1 -0
  685. transformers/models/kosmos2_5/configuration_kosmos2_5.py +9 -8
  686. transformers/models/kosmos2_5/image_processing_kosmos2_5.py +12 -10
  687. transformers/models/kosmos2_5/image_processing_kosmos2_5_fast.py +13 -4
  688. transformers/models/kosmos2_5/modeling_kosmos2_5.py +118 -132
  689. transformers/models/kosmos2_5/processing_kosmos2_5.py +29 -8
  690. transformers/models/kyutai_speech_to_text/configuration_kyutai_speech_to_text.py +28 -31
  691. transformers/models/kyutai_speech_to_text/feature_extraction_kyutai_speech_to_text.py +14 -12
  692. transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py +100 -110
  693. transformers/models/kyutai_speech_to_text/modular_kyutai_speech_to_text.py +22 -28
  694. transformers/models/kyutai_speech_to_text/processing_kyutai_speech_to_text.py +8 -2
  695. transformers/models/layoutlm/configuration_layoutlm.py +2 -14
  696. transformers/models/layoutlm/modeling_layoutlm.py +72 -77
  697. transformers/models/layoutlmv2/configuration_layoutlmv2.py +17 -14
  698. transformers/models/layoutlmv2/image_processing_layoutlmv2.py +21 -18
  699. transformers/models/layoutlmv2/image_processing_layoutlmv2_fast.py +9 -7
  700. transformers/models/layoutlmv2/modeling_layoutlmv2.py +50 -64
  701. transformers/models/layoutlmv2/processing_layoutlmv2.py +44 -14
  702. transformers/models/layoutlmv2/tokenization_layoutlmv2.py +126 -73
  703. transformers/models/layoutlmv3/configuration_layoutlmv3.py +19 -16
  704. transformers/models/layoutlmv3/image_processing_layoutlmv3.py +26 -24
  705. transformers/models/layoutlmv3/image_processing_layoutlmv3_fast.py +11 -9
  706. transformers/models/layoutlmv3/modeling_layoutlmv3.py +56 -82
  707. transformers/models/layoutlmv3/processing_layoutlmv3.py +46 -14
  708. transformers/models/layoutlmv3/tokenization_layoutlmv3.py +134 -74
  709. transformers/models/layoutxlm/configuration_layoutxlm.py +17 -14
  710. transformers/models/layoutxlm/modular_layoutxlm.py +1 -0
  711. transformers/models/layoutxlm/processing_layoutxlm.py +44 -14
  712. transformers/models/layoutxlm/tokenization_layoutxlm.py +113 -77
  713. transformers/models/led/configuration_led.py +12 -8
  714. transformers/models/led/modeling_led.py +266 -124
  715. transformers/models/levit/configuration_levit.py +1 -0
  716. transformers/models/levit/image_processing_levit.py +21 -19
  717. transformers/models/levit/image_processing_levit_fast.py +5 -4
  718. transformers/models/levit/modeling_levit.py +19 -38
  719. transformers/models/lfm2/configuration_lfm2.py +30 -27
  720. transformers/models/lfm2/modeling_lfm2.py +50 -47
  721. transformers/models/lfm2/modular_lfm2.py +30 -29
  722. transformers/models/lfm2_moe/__init__.py +1 -0
  723. transformers/models/lfm2_moe/configuration_lfm2_moe.py +9 -6
  724. transformers/models/lfm2_moe/modeling_lfm2_moe.py +53 -61
  725. transformers/models/lfm2_moe/modular_lfm2_moe.py +37 -13
  726. transformers/models/lfm2_vl/configuration_lfm2_vl.py +1 -4
  727. transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py +12 -41
  728. transformers/models/lfm2_vl/modeling_lfm2_vl.py +66 -84
  729. transformers/models/lfm2_vl/modular_lfm2_vl.py +56 -70
  730. transformers/models/lfm2_vl/processing_lfm2_vl.py +76 -96
  731. transformers/models/lightglue/image_processing_lightglue.py +15 -16
  732. transformers/models/lightglue/image_processing_lightglue_fast.py +9 -9
  733. transformers/models/lightglue/modeling_lightglue.py +31 -31
  734. transformers/models/lightglue/modular_lightglue.py +28 -29
  735. transformers/models/lilt/configuration_lilt.py +2 -6
  736. transformers/models/lilt/modeling_lilt.py +70 -76
  737. transformers/models/llama/configuration_llama.py +31 -26
  738. transformers/models/llama/modeling_llama.py +39 -36
  739. transformers/models/llama/tokenization_llama.py +44 -14
  740. transformers/models/llama4/configuration_llama4.py +30 -27
  741. transformers/models/llama4/image_processing_llama4_fast.py +14 -12
  742. transformers/models/llama4/modeling_llama4.py +113 -120
  743. transformers/models/llama4/processing_llama4.py +57 -33
  744. transformers/models/llava/configuration_llava.py +1 -10
  745. transformers/models/llava/image_processing_llava.py +28 -25
  746. transformers/models/llava/image_processing_llava_fast.py +11 -9
  747. transformers/models/llava/modeling_llava.py +109 -85
  748. transformers/models/llava/processing_llava.py +51 -18
  749. transformers/models/llava_next/configuration_llava_next.py +2 -2
  750. transformers/models/llava_next/image_processing_llava_next.py +45 -43
  751. transformers/models/llava_next/image_processing_llava_next_fast.py +13 -11
  752. transformers/models/llava_next/modeling_llava_next.py +107 -110
  753. transformers/models/llava_next/processing_llava_next.py +47 -18
  754. transformers/models/llava_next_video/configuration_llava_next_video.py +7 -4
  755. transformers/models/llava_next_video/modeling_llava_next_video.py +158 -175
  756. transformers/models/llava_next_video/modular_llava_next_video.py +150 -155
  757. transformers/models/llava_next_video/processing_llava_next_video.py +63 -21
  758. transformers/models/llava_next_video/video_processing_llava_next_video.py +1 -0
  759. transformers/models/llava_onevision/configuration_llava_onevision.py +7 -4
  760. transformers/models/llava_onevision/image_processing_llava_onevision.py +42 -40
  761. transformers/models/llava_onevision/image_processing_llava_onevision_fast.py +15 -14
  762. transformers/models/llava_onevision/modeling_llava_onevision.py +169 -177
  763. transformers/models/llava_onevision/modular_llava_onevision.py +156 -163
  764. transformers/models/llava_onevision/processing_llava_onevision.py +53 -21
  765. transformers/models/llava_onevision/video_processing_llava_onevision.py +1 -0
  766. transformers/models/longcat_flash/__init__.py +1 -0
  767. transformers/models/longcat_flash/configuration_longcat_flash.py +42 -37
  768. transformers/models/longcat_flash/modeling_longcat_flash.py +36 -36
  769. transformers/models/longcat_flash/modular_longcat_flash.py +21 -21
  770. transformers/models/longformer/configuration_longformer.py +5 -5
  771. transformers/models/longformer/modeling_longformer.py +101 -105
  772. transformers/models/longt5/configuration_longt5.py +7 -9
  773. transformers/models/longt5/modeling_longt5.py +49 -49
  774. transformers/models/luke/configuration_luke.py +2 -8
  775. transformers/models/luke/modeling_luke.py +181 -188
  776. transformers/models/luke/tokenization_luke.py +140 -107
  777. transformers/models/lxmert/configuration_lxmert.py +1 -16
  778. transformers/models/lxmert/modeling_lxmert.py +74 -65
  779. transformers/models/m2m_100/configuration_m2m_100.py +9 -7
  780. transformers/models/m2m_100/modeling_m2m_100.py +71 -83
  781. transformers/models/m2m_100/tokenization_m2m_100.py +8 -8
  782. transformers/models/mamba/configuration_mamba.py +2 -1
  783. transformers/models/mamba/modeling_mamba.py +66 -58
  784. transformers/models/mamba2/configuration_mamba2.py +8 -5
  785. transformers/models/mamba2/modeling_mamba2.py +69 -68
  786. transformers/models/marian/configuration_marian.py +5 -10
  787. transformers/models/marian/modeling_marian.py +87 -93
  788. transformers/models/marian/tokenization_marian.py +6 -6
  789. transformers/models/markuplm/configuration_markuplm.py +7 -4
  790. transformers/models/markuplm/feature_extraction_markuplm.py +2 -1
  791. transformers/models/markuplm/modeling_markuplm.py +70 -69
  792. transformers/models/markuplm/processing_markuplm.py +38 -31
  793. transformers/models/markuplm/tokenization_markuplm.py +136 -93
  794. transformers/models/mask2former/configuration_mask2former.py +8 -5
  795. transformers/models/mask2former/image_processing_mask2former.py +85 -84
  796. transformers/models/mask2former/image_processing_mask2former_fast.py +40 -37
  797. transformers/models/mask2former/modeling_mask2former.py +103 -118
  798. transformers/models/mask2former/modular_mask2former.py +8 -6
  799. transformers/models/maskformer/configuration_maskformer.py +9 -6
  800. transformers/models/maskformer/configuration_maskformer_swin.py +1 -0
  801. transformers/models/maskformer/image_processing_maskformer.py +85 -84
  802. transformers/models/maskformer/image_processing_maskformer_fast.py +40 -36
  803. transformers/models/maskformer/modeling_maskformer.py +65 -79
  804. transformers/models/maskformer/modeling_maskformer_swin.py +32 -36
  805. transformers/models/mbart/configuration_mbart.py +4 -9
  806. transformers/models/mbart/modeling_mbart.py +116 -131
  807. transformers/models/mbart/tokenization_mbart.py +54 -11
  808. transformers/models/mbart50/tokenization_mbart50.py +13 -8
  809. transformers/models/megatron_bert/configuration_megatron_bert.py +3 -13
  810. transformers/models/megatron_bert/modeling_megatron_bert.py +150 -148
  811. transformers/models/metaclip_2/configuration_metaclip_2.py +1 -4
  812. transformers/models/metaclip_2/modeling_metaclip_2.py +84 -91
  813. transformers/models/metaclip_2/modular_metaclip_2.py +45 -61
  814. transformers/models/mgp_str/configuration_mgp_str.py +1 -0
  815. transformers/models/mgp_str/modeling_mgp_str.py +18 -20
  816. transformers/models/mgp_str/processing_mgp_str.py +20 -3
  817. transformers/models/mgp_str/tokenization_mgp_str.py +3 -1
  818. transformers/models/mimi/configuration_mimi.py +40 -42
  819. transformers/models/mimi/modeling_mimi.py +113 -142
  820. transformers/models/minimax/__init__.py +1 -0
  821. transformers/models/minimax/configuration_minimax.py +43 -37
  822. transformers/models/minimax/modeling_minimax.py +51 -61
  823. transformers/models/minimax/modular_minimax.py +62 -68
  824. transformers/models/ministral/configuration_ministral.py +29 -25
  825. transformers/models/ministral/modeling_ministral.py +38 -36
  826. transformers/models/ministral/modular_ministral.py +37 -32
  827. transformers/models/ministral3/configuration_ministral3.py +27 -24
  828. transformers/models/ministral3/modeling_ministral3.py +37 -36
  829. transformers/models/ministral3/modular_ministral3.py +5 -4
  830. transformers/models/mistral/configuration_mistral.py +29 -24
  831. transformers/models/mistral/modeling_mistral.py +37 -36
  832. transformers/models/mistral/modular_mistral.py +12 -11
  833. transformers/models/mistral3/configuration_mistral3.py +1 -4
  834. transformers/models/mistral3/modeling_mistral3.py +86 -89
  835. transformers/models/mistral3/modular_mistral3.py +68 -69
  836. transformers/models/mixtral/configuration_mixtral.py +34 -29
  837. transformers/models/mixtral/modeling_mixtral.py +45 -50
  838. transformers/models/mixtral/modular_mixtral.py +31 -32
  839. transformers/models/mlcd/configuration_mlcd.py +1 -0
  840. transformers/models/mlcd/modeling_mlcd.py +14 -20
  841. transformers/models/mlcd/modular_mlcd.py +13 -17
  842. transformers/models/mllama/configuration_mllama.py +15 -10
  843. transformers/models/mllama/image_processing_mllama.py +25 -23
  844. transformers/models/mllama/image_processing_mllama_fast.py +11 -11
  845. transformers/models/mllama/modeling_mllama.py +94 -105
  846. transformers/models/mllama/processing_mllama.py +55 -6
  847. transformers/models/mluke/tokenization_mluke.py +107 -101
  848. transformers/models/mm_grounding_dino/configuration_mm_grounding_dino.py +3 -5
  849. transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py +140 -155
  850. transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py +3 -5
  851. transformers/models/mobilebert/configuration_mobilebert.py +2 -4
  852. transformers/models/mobilebert/modeling_mobilebert.py +85 -77
  853. transformers/models/mobilebert/tokenization_mobilebert.py +1 -0
  854. transformers/models/mobilenet_v1/configuration_mobilenet_v1.py +1 -0
  855. transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py +23 -20
  856. transformers/models/mobilenet_v1/image_processing_mobilenet_v1_fast.py +1 -0
  857. transformers/models/mobilenet_v1/modeling_mobilenet_v1.py +16 -15
  858. transformers/models/mobilenet_v2/configuration_mobilenet_v2.py +1 -0
  859. transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py +51 -48
  860. transformers/models/mobilenet_v2/image_processing_mobilenet_v2_fast.py +15 -13
  861. transformers/models/mobilenet_v2/modeling_mobilenet_v2.py +22 -24
  862. transformers/models/mobilevit/configuration_mobilevit.py +1 -0
  863. transformers/models/mobilevit/image_processing_mobilevit.py +49 -46
  864. transformers/models/mobilevit/image_processing_mobilevit_fast.py +14 -12
  865. transformers/models/mobilevit/modeling_mobilevit.py +21 -28
  866. transformers/models/mobilevitv2/configuration_mobilevitv2.py +1 -0
  867. transformers/models/mobilevitv2/modeling_mobilevitv2.py +22 -28
  868. transformers/models/modernbert/configuration_modernbert.py +42 -44
  869. transformers/models/modernbert/modeling_modernbert.py +133 -145
  870. transformers/models/modernbert/modular_modernbert.py +170 -186
  871. transformers/models/modernbert_decoder/configuration_modernbert_decoder.py +40 -40
  872. transformers/models/modernbert_decoder/modeling_modernbert_decoder.py +57 -62
  873. transformers/models/modernbert_decoder/modular_modernbert_decoder.py +86 -94
  874. transformers/models/moonshine/configuration_moonshine.py +31 -34
  875. transformers/models/moonshine/modeling_moonshine.py +71 -71
  876. transformers/models/moonshine/modular_moonshine.py +83 -88
  877. transformers/models/moshi/configuration_moshi.py +23 -46
  878. transformers/models/moshi/modeling_moshi.py +187 -157
  879. transformers/models/mpnet/configuration_mpnet.py +2 -6
  880. transformers/models/mpnet/modeling_mpnet.py +57 -62
  881. transformers/models/mpnet/tokenization_mpnet.py +15 -4
  882. transformers/models/mpt/configuration_mpt.py +9 -5
  883. transformers/models/mpt/modeling_mpt.py +60 -60
  884. transformers/models/mra/configuration_mra.py +2 -8
  885. transformers/models/mra/modeling_mra.py +57 -64
  886. transformers/models/mt5/configuration_mt5.py +8 -10
  887. transformers/models/mt5/modeling_mt5.py +95 -87
  888. transformers/models/musicgen/configuration_musicgen.py +8 -12
  889. transformers/models/musicgen/modeling_musicgen.py +122 -118
  890. transformers/models/musicgen/processing_musicgen.py +21 -3
  891. transformers/models/musicgen_melody/configuration_musicgen_melody.py +8 -15
  892. transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py +9 -8
  893. transformers/models/musicgen_melody/modeling_musicgen_melody.py +123 -117
  894. transformers/models/musicgen_melody/processing_musicgen_melody.py +22 -3
  895. transformers/models/mvp/configuration_mvp.py +5 -8
  896. transformers/models/mvp/modeling_mvp.py +123 -135
  897. transformers/models/myt5/tokenization_myt5.py +10 -8
  898. transformers/models/nanochat/configuration_nanochat.py +8 -5
  899. transformers/models/nanochat/modeling_nanochat.py +40 -37
  900. transformers/models/nanochat/modular_nanochat.py +14 -12
  901. transformers/models/nemotron/configuration_nemotron.py +30 -25
  902. transformers/models/nemotron/modeling_nemotron.py +57 -56
  903. transformers/models/nllb/tokenization_nllb.py +28 -12
  904. transformers/models/nllb_moe/configuration_nllb_moe.py +9 -7
  905. transformers/models/nllb_moe/modeling_nllb_moe.py +69 -77
  906. transformers/models/nougat/image_processing_nougat.py +32 -29
  907. transformers/models/nougat/image_processing_nougat_fast.py +14 -12
  908. transformers/models/nougat/processing_nougat.py +39 -37
  909. transformers/models/nougat/tokenization_nougat.py +73 -18
  910. transformers/models/nystromformer/configuration_nystromformer.py +2 -8
  911. transformers/models/nystromformer/modeling_nystromformer.py +63 -74
  912. transformers/models/olmo/configuration_olmo.py +28 -23
  913. transformers/models/olmo/modeling_olmo.py +39 -36
  914. transformers/models/olmo/modular_olmo.py +11 -7
  915. transformers/models/olmo2/configuration_olmo2.py +28 -23
  916. transformers/models/olmo2/modeling_olmo2.py +41 -37
  917. transformers/models/olmo2/modular_olmo2.py +32 -29
  918. transformers/models/olmo3/__init__.py +1 -0
  919. transformers/models/olmo3/configuration_olmo3.py +30 -26
  920. transformers/models/olmo3/modeling_olmo3.py +39 -36
  921. transformers/models/olmo3/modular_olmo3.py +40 -37
  922. transformers/models/olmoe/configuration_olmoe.py +33 -29
  923. transformers/models/olmoe/modeling_olmoe.py +46 -52
  924. transformers/models/olmoe/modular_olmoe.py +15 -16
  925. transformers/models/omdet_turbo/configuration_omdet_turbo.py +4 -2
  926. transformers/models/omdet_turbo/modeling_omdet_turbo.py +47 -53
  927. transformers/models/omdet_turbo/processing_omdet_turbo.py +67 -19
  928. transformers/models/oneformer/configuration_oneformer.py +8 -5
  929. transformers/models/oneformer/image_processing_oneformer.py +84 -83
  930. transformers/models/oneformer/image_processing_oneformer_fast.py +42 -41
  931. transformers/models/oneformer/modeling_oneformer.py +171 -147
  932. transformers/models/oneformer/processing_oneformer.py +43 -28
  933. transformers/models/openai/configuration_openai.py +1 -16
  934. transformers/models/openai/modeling_openai.py +51 -65
  935. transformers/models/openai/tokenization_openai.py +47 -8
  936. transformers/models/opt/configuration_opt.py +7 -6
  937. transformers/models/opt/modeling_opt.py +76 -78
  938. transformers/models/ovis2/__init__.py +1 -0
  939. transformers/models/ovis2/configuration_ovis2.py +1 -0
  940. transformers/models/ovis2/image_processing_ovis2.py +24 -22
  941. transformers/models/ovis2/image_processing_ovis2_fast.py +11 -9
  942. transformers/models/ovis2/modeling_ovis2.py +142 -111
  943. transformers/models/ovis2/modular_ovis2.py +45 -90
  944. transformers/models/ovis2/processing_ovis2.py +40 -12
  945. transformers/models/owlv2/configuration_owlv2.py +2 -4
  946. transformers/models/owlv2/image_processing_owlv2.py +21 -20
  947. transformers/models/owlv2/image_processing_owlv2_fast.py +15 -12
  948. transformers/models/owlv2/modeling_owlv2.py +117 -133
  949. transformers/models/owlv2/modular_owlv2.py +14 -11
  950. transformers/models/owlv2/processing_owlv2.py +49 -20
  951. transformers/models/owlvit/configuration_owlvit.py +2 -4
  952. transformers/models/owlvit/image_processing_owlvit.py +22 -21
  953. transformers/models/owlvit/image_processing_owlvit_fast.py +3 -2
  954. transformers/models/owlvit/modeling_owlvit.py +116 -132
  955. transformers/models/owlvit/processing_owlvit.py +48 -20
  956. transformers/models/paligemma/configuration_paligemma.py +1 -4
  957. transformers/models/paligemma/modeling_paligemma.py +93 -103
  958. transformers/models/paligemma/processing_paligemma.py +66 -13
  959. transformers/models/parakeet/configuration_parakeet.py +14 -7
  960. transformers/models/parakeet/feature_extraction_parakeet.py +12 -10
  961. transformers/models/parakeet/modeling_parakeet.py +28 -32
  962. transformers/models/parakeet/modular_parakeet.py +20 -23
  963. transformers/models/parakeet/processing_parakeet.py +5 -13
  964. transformers/models/parakeet/{tokenization_parakeet.py → tokenization_parakeet_fast.py} +7 -5
  965. transformers/models/patchtsmixer/configuration_patchtsmixer.py +8 -5
  966. transformers/models/patchtsmixer/modeling_patchtsmixer.py +62 -70
  967. transformers/models/patchtst/configuration_patchtst.py +9 -6
  968. transformers/models/patchtst/modeling_patchtst.py +80 -97
  969. transformers/models/pegasus/configuration_pegasus.py +5 -8
  970. transformers/models/pegasus/modeling_pegasus.py +66 -72
  971. transformers/models/pegasus/tokenization_pegasus.py +45 -15
  972. transformers/models/pegasus_x/configuration_pegasus_x.py +4 -5
  973. transformers/models/pegasus_x/modeling_pegasus_x.py +52 -55
  974. transformers/models/perceiver/configuration_perceiver.py +1 -0
  975. transformers/models/perceiver/image_processing_perceiver.py +25 -22
  976. transformers/models/perceiver/image_processing_perceiver_fast.py +9 -7
  977. transformers/models/perceiver/modeling_perceiver.py +146 -165
  978. transformers/models/perceiver/tokenization_perceiver.py +6 -3
  979. transformers/models/perception_lm/configuration_perception_lm.py +1 -0
  980. transformers/models/perception_lm/image_processing_perception_lm_fast.py +10 -8
  981. transformers/models/perception_lm/modeling_perception_lm.py +70 -71
  982. transformers/models/perception_lm/modular_perception_lm.py +61 -65
  983. transformers/models/perception_lm/processing_perception_lm.py +47 -13
  984. transformers/models/perception_lm/video_processing_perception_lm.py +1 -0
  985. transformers/models/persimmon/configuration_persimmon.py +28 -23
  986. transformers/models/persimmon/modeling_persimmon.py +45 -43
  987. transformers/models/phi/configuration_phi.py +28 -23
  988. transformers/models/phi/modeling_phi.py +43 -40
  989. transformers/models/phi/modular_phi.py +24 -23
  990. transformers/models/phi3/configuration_phi3.py +33 -28
  991. transformers/models/phi3/modeling_phi3.py +38 -36
  992. transformers/models/phi3/modular_phi3.py +17 -13
  993. transformers/models/phi4_multimodal/configuration_phi4_multimodal.py +33 -30
  994. transformers/models/phi4_multimodal/feature_extraction_phi4_multimodal.py +9 -7
  995. transformers/models/phi4_multimodal/image_processing_phi4_multimodal_fast.py +11 -11
  996. transformers/models/phi4_multimodal/modeling_phi4_multimodal.py +78 -95
  997. transformers/models/phi4_multimodal/modular_phi4_multimodal.py +80 -98
  998. transformers/models/phi4_multimodal/processing_phi4_multimodal.py +44 -7
  999. transformers/models/phimoe/configuration_phimoe.py +36 -31
  1000. transformers/models/phimoe/modeling_phimoe.py +45 -50
  1001. transformers/models/phimoe/modular_phimoe.py +4 -3
  1002. transformers/models/phobert/tokenization_phobert.py +6 -4
  1003. transformers/models/pix2struct/configuration_pix2struct.py +10 -12
  1004. transformers/models/pix2struct/image_processing_pix2struct.py +19 -15
  1005. transformers/models/pix2struct/image_processing_pix2struct_fast.py +15 -12
  1006. transformers/models/pix2struct/modeling_pix2struct.py +52 -58
  1007. transformers/models/pix2struct/processing_pix2struct.py +30 -5
  1008. transformers/models/pixtral/configuration_pixtral.py +14 -11
  1009. transformers/models/pixtral/image_processing_pixtral.py +28 -26
  1010. transformers/models/pixtral/image_processing_pixtral_fast.py +11 -10
  1011. transformers/models/pixtral/modeling_pixtral.py +34 -28
  1012. transformers/models/pixtral/processing_pixtral.py +53 -21
  1013. transformers/models/plbart/configuration_plbart.py +5 -8
  1014. transformers/models/plbart/modeling_plbart.py +106 -119
  1015. transformers/models/plbart/modular_plbart.py +33 -39
  1016. transformers/models/plbart/tokenization_plbart.py +7 -4
  1017. transformers/models/poolformer/configuration_poolformer.py +1 -0
  1018. transformers/models/poolformer/image_processing_poolformer.py +24 -21
  1019. transformers/models/poolformer/image_processing_poolformer_fast.py +15 -13
  1020. transformers/models/poolformer/modeling_poolformer.py +13 -23
  1021. transformers/models/pop2piano/configuration_pop2piano.py +8 -7
  1022. transformers/models/pop2piano/feature_extraction_pop2piano.py +9 -6
  1023. transformers/models/pop2piano/modeling_pop2piano.py +24 -26
  1024. transformers/models/pop2piano/processing_pop2piano.py +33 -25
  1025. transformers/models/pop2piano/tokenization_pop2piano.py +23 -15
  1026. transformers/models/prompt_depth_anything/configuration_prompt_depth_anything.py +3 -3
  1027. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py +28 -28
  1028. transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything_fast.py +21 -20
  1029. transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py +13 -16
  1030. transformers/models/prompt_depth_anything/modular_prompt_depth_anything.py +13 -16
  1031. transformers/models/prophetnet/configuration_prophetnet.py +38 -37
  1032. transformers/models/prophetnet/modeling_prophetnet.py +131 -114
  1033. transformers/models/prophetnet/tokenization_prophetnet.py +16 -14
  1034. transformers/models/pvt/configuration_pvt.py +1 -0
  1035. transformers/models/pvt/image_processing_pvt.py +27 -24
  1036. transformers/models/pvt/image_processing_pvt_fast.py +2 -1
  1037. transformers/models/pvt/modeling_pvt.py +21 -21
  1038. transformers/models/pvt_v2/configuration_pvt_v2.py +4 -2
  1039. transformers/models/pvt_v2/modeling_pvt_v2.py +25 -28
  1040. transformers/models/qwen2/configuration_qwen2.py +25 -32
  1041. transformers/models/qwen2/modeling_qwen2.py +38 -36
  1042. transformers/models/qwen2/modular_qwen2.py +12 -11
  1043. transformers/models/qwen2/tokenization_qwen2.py +23 -12
  1044. transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py +26 -32
  1045. transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py +277 -340
  1046. transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py +211 -278
  1047. transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py +49 -41
  1048. transformers/models/qwen2_5_vl/configuration_qwen2_5_vl.py +35 -29
  1049. transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py +148 -203
  1050. transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py +118 -93
  1051. transformers/models/qwen2_5_vl/processing_qwen2_5_vl.py +43 -7
  1052. transformers/models/qwen2_audio/configuration_qwen2_audio.py +1 -0
  1053. transformers/models/qwen2_audio/modeling_qwen2_audio.py +40 -40
  1054. transformers/models/qwen2_audio/processing_qwen2_audio.py +42 -13
  1055. transformers/models/qwen2_moe/configuration_qwen2_moe.py +35 -42
  1056. transformers/models/qwen2_moe/modeling_qwen2_moe.py +46 -51
  1057. transformers/models/qwen2_moe/modular_qwen2_moe.py +10 -7
  1058. transformers/models/qwen2_vl/configuration_qwen2_vl.py +34 -29
  1059. transformers/models/qwen2_vl/image_processing_qwen2_vl.py +42 -41
  1060. transformers/models/qwen2_vl/image_processing_qwen2_vl_fast.py +15 -12
  1061. transformers/models/qwen2_vl/modeling_qwen2_vl.py +153 -199
  1062. transformers/models/qwen2_vl/processing_qwen2_vl.py +44 -7
  1063. transformers/models/qwen2_vl/video_processing_qwen2_vl.py +18 -38
  1064. transformers/models/qwen3/configuration_qwen3.py +27 -34
  1065. transformers/models/qwen3/modeling_qwen3.py +39 -36
  1066. transformers/models/qwen3/modular_qwen3.py +6 -4
  1067. transformers/models/qwen3_moe/configuration_qwen3_moe.py +32 -39
  1068. transformers/models/qwen3_moe/modeling_qwen3_moe.py +46 -51
  1069. transformers/models/qwen3_moe/modular_qwen3_moe.py +13 -10
  1070. transformers/models/qwen3_next/configuration_qwen3_next.py +35 -45
  1071. transformers/models/qwen3_next/modeling_qwen3_next.py +51 -47
  1072. transformers/models/qwen3_next/modular_qwen3_next.py +35 -34
  1073. transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py +101 -135
  1074. transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py +252 -355
  1075. transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py +196 -250
  1076. transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py +48 -40
  1077. transformers/models/qwen3_vl/configuration_qwen3_vl.py +29 -27
  1078. transformers/models/qwen3_vl/modeling_qwen3_vl.py +155 -233
  1079. transformers/models/qwen3_vl/modular_qwen3_vl.py +179 -206
  1080. transformers/models/qwen3_vl/processing_qwen3_vl.py +42 -6
  1081. transformers/models/qwen3_vl/video_processing_qwen3_vl.py +12 -10
  1082. transformers/models/qwen3_vl_moe/configuration_qwen3_vl_moe.py +30 -23
  1083. transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py +303 -358
  1084. transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py +124 -87
  1085. transformers/models/rag/configuration_rag.py +15 -6
  1086. transformers/models/rag/modeling_rag.py +130 -127
  1087. transformers/models/rag/retrieval_rag.py +5 -3
  1088. transformers/models/rag/tokenization_rag.py +50 -0
  1089. transformers/models/recurrent_gemma/configuration_recurrent_gemma.py +30 -29
  1090. transformers/models/recurrent_gemma/modeling_recurrent_gemma.py +42 -53
  1091. transformers/models/reformer/configuration_reformer.py +8 -7
  1092. transformers/models/reformer/modeling_reformer.py +69 -80
  1093. transformers/models/reformer/tokenization_reformer.py +31 -11
  1094. transformers/models/regnet/configuration_regnet.py +1 -0
  1095. transformers/models/regnet/modeling_regnet.py +8 -15
  1096. transformers/models/rembert/configuration_rembert.py +2 -8
  1097. transformers/models/rembert/modeling_rembert.py +111 -121
  1098. transformers/models/rembert/tokenization_rembert.py +12 -2
  1099. transformers/models/resnet/configuration_resnet.py +1 -0
  1100. transformers/models/resnet/modeling_resnet.py +13 -27
  1101. transformers/models/roberta/configuration_roberta.py +3 -11
  1102. transformers/models/roberta/modeling_roberta.py +93 -94
  1103. transformers/models/roberta/modular_roberta.py +58 -58
  1104. transformers/models/roberta/tokenization_roberta.py +29 -17
  1105. transformers/models/roberta/tokenization_roberta_old.py +4 -2
  1106. transformers/models/roberta_prelayernorm/configuration_roberta_prelayernorm.py +3 -11
  1107. transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py +93 -94
  1108. transformers/models/roc_bert/configuration_roc_bert.py +2 -8
  1109. transformers/models/roc_bert/modeling_roc_bert.py +121 -122
  1110. transformers/models/roc_bert/tokenization_roc_bert.py +94 -88
  1111. transformers/models/roformer/configuration_roformer.py +3 -13
  1112. transformers/models/roformer/modeling_roformer.py +81 -85
  1113. transformers/models/roformer/tokenization_roformer.py +412 -74
  1114. transformers/models/roformer/tokenization_roformer_fast.py +160 -0
  1115. transformers/models/roformer/tokenization_utils.py +1 -0
  1116. transformers/models/rt_detr/configuration_rt_detr.py +2 -1
  1117. transformers/models/rt_detr/configuration_rt_detr_resnet.py +1 -0
  1118. transformers/models/rt_detr/image_processing_rt_detr.py +55 -54
  1119. transformers/models/rt_detr/image_processing_rt_detr_fast.py +26 -26
  1120. transformers/models/rt_detr/modeling_rt_detr.py +90 -99
  1121. transformers/models/rt_detr/modeling_rt_detr_resnet.py +6 -13
  1122. transformers/models/rt_detr/modular_rt_detr.py +16 -16
  1123. transformers/models/rt_detr_v2/configuration_rt_detr_v2.py +4 -6
  1124. transformers/models/rt_detr_v2/modeling_rt_detr_v2.py +90 -101
  1125. transformers/models/rt_detr_v2/modular_rt_detr_v2.py +12 -19
  1126. transformers/models/rwkv/configuration_rwkv.py +4 -2
  1127. transformers/models/rwkv/modeling_rwkv.py +32 -31
  1128. transformers/models/sam/configuration_sam.py +1 -3
  1129. transformers/models/sam/image_processing_sam.py +60 -59
  1130. transformers/models/sam/image_processing_sam_fast.py +27 -25
  1131. transformers/models/sam/modeling_sam.py +41 -47
  1132. transformers/models/sam/processing_sam.py +27 -39
  1133. transformers/models/sam2/configuration_sam2.py +3 -2
  1134. transformers/models/sam2/image_processing_sam2_fast.py +15 -14
  1135. transformers/models/sam2/modeling_sam2.py +90 -96
  1136. transformers/models/sam2/modular_sam2.py +91 -86
  1137. transformers/models/sam2/processing_sam2.py +47 -31
  1138. transformers/models/sam2_video/configuration_sam2_video.py +1 -0
  1139. transformers/models/sam2_video/modeling_sam2_video.py +144 -151
  1140. transformers/models/sam2_video/modular_sam2_video.py +104 -101
  1141. transformers/models/sam2_video/processing_sam2_video.py +66 -49
  1142. transformers/models/sam2_video/video_processing_sam2_video.py +4 -1
  1143. transformers/models/sam3/configuration_sam3.py +2 -21
  1144. transformers/models/sam3/image_processing_sam3_fast.py +20 -17
  1145. transformers/models/sam3/modeling_sam3.py +170 -184
  1146. transformers/models/sam3/modular_sam3.py +8 -3
  1147. transformers/models/sam3/processing_sam3.py +52 -37
  1148. transformers/models/sam3_tracker/__init__.py +1 -0
  1149. transformers/models/sam3_tracker/configuration_sam3_tracker.py +3 -1
  1150. transformers/models/sam3_tracker/modeling_sam3_tracker.py +77 -82
  1151. transformers/models/sam3_tracker/modular_sam3_tracker.py +3 -8
  1152. transformers/models/sam3_tracker/processing_sam3_tracker.py +48 -31
  1153. transformers/models/sam3_tracker_video/__init__.py +1 -0
  1154. transformers/models/sam3_tracker_video/configuration_sam3_tracker_video.py +1 -25
  1155. transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py +122 -135
  1156. transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py +26 -35
  1157. transformers/models/sam3_tracker_video/processing_sam3_tracker_video.py +66 -50
  1158. transformers/models/sam3_video/configuration_sam3_video.py +1 -14
  1159. transformers/models/sam3_video/modeling_sam3_video.py +34 -33
  1160. transformers/models/sam3_video/processing_sam3_video.py +46 -26
  1161. transformers/models/sam_hq/__init__.py +1 -1
  1162. transformers/models/sam_hq/configuration_sam_hq.py +1 -3
  1163. transformers/models/sam_hq/modeling_sam_hq.py +69 -74
  1164. transformers/models/sam_hq/modular_sam_hq.py +25 -23
  1165. transformers/models/sam_hq/{processing_sam_hq.py → processing_samhq.py} +29 -41
  1166. transformers/models/seamless_m4t/configuration_seamless_m4t.py +10 -8
  1167. transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py +11 -8
  1168. transformers/models/seamless_m4t/modeling_seamless_m4t.py +194 -212
  1169. transformers/models/seamless_m4t/processing_seamless_m4t.py +39 -18
  1170. transformers/models/seamless_m4t/tokenization_seamless_m4t.py +77 -40
  1171. transformers/models/seamless_m4t_v2/configuration_seamless_m4t_v2.py +10 -8
  1172. transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py +196 -204
  1173. transformers/models/seed_oss/configuration_seed_oss.py +32 -28
  1174. transformers/models/seed_oss/modeling_seed_oss.py +35 -33
  1175. transformers/models/seed_oss/modular_seed_oss.py +4 -3
  1176. transformers/models/segformer/configuration_segformer.py +10 -0
  1177. transformers/models/segformer/image_processing_segformer.py +42 -39
  1178. transformers/models/segformer/image_processing_segformer_fast.py +12 -10
  1179. transformers/models/segformer/modeling_segformer.py +31 -34
  1180. transformers/models/segformer/modular_segformer.py +10 -8
  1181. transformers/models/seggpt/configuration_seggpt.py +1 -0
  1182. transformers/models/seggpt/image_processing_seggpt.py +41 -38
  1183. transformers/models/seggpt/modeling_seggpt.py +38 -50
  1184. transformers/models/sew/configuration_sew.py +2 -4
  1185. transformers/models/sew/modeling_sew.py +36 -38
  1186. transformers/models/sew/modular_sew.py +13 -13
  1187. transformers/models/sew_d/configuration_sew_d.py +2 -4
  1188. transformers/models/sew_d/modeling_sew_d.py +30 -31
  1189. transformers/models/shieldgemma2/configuration_shieldgemma2.py +1 -0
  1190. transformers/models/shieldgemma2/modeling_shieldgemma2.py +17 -16
  1191. transformers/models/shieldgemma2/processing_shieldgemma2.py +5 -3
  1192. transformers/models/siglip/configuration_siglip.py +2 -4
  1193. transformers/models/siglip/image_processing_siglip.py +20 -17
  1194. transformers/models/siglip/image_processing_siglip_fast.py +1 -0
  1195. transformers/models/siglip/modeling_siglip.py +75 -84
  1196. transformers/models/siglip/processing_siglip.py +14 -2
  1197. transformers/models/siglip/tokenization_siglip.py +7 -6
  1198. transformers/models/siglip2/configuration_siglip2.py +2 -5
  1199. transformers/models/siglip2/image_processing_siglip2.py +16 -15
  1200. transformers/models/siglip2/image_processing_siglip2_fast.py +7 -6
  1201. transformers/models/siglip2/modeling_siglip2.py +129 -143
  1202. transformers/models/siglip2/modular_siglip2.py +46 -47
  1203. transformers/models/siglip2/processing_siglip2.py +14 -2
  1204. transformers/models/smollm3/configuration_smollm3.py +32 -29
  1205. transformers/models/smollm3/modeling_smollm3.py +39 -36
  1206. transformers/models/smollm3/modular_smollm3.py +35 -33
  1207. transformers/models/smolvlm/configuration_smolvlm.py +4 -2
  1208. transformers/models/smolvlm/image_processing_smolvlm.py +43 -42
  1209. transformers/models/smolvlm/image_processing_smolvlm_fast.py +15 -41
  1210. transformers/models/smolvlm/modeling_smolvlm.py +94 -126
  1211. transformers/models/smolvlm/modular_smolvlm.py +39 -50
  1212. transformers/models/smolvlm/processing_smolvlm.py +83 -15
  1213. transformers/models/smolvlm/video_processing_smolvlm.py +18 -16
  1214. transformers/models/speech_encoder_decoder/configuration_speech_encoder_decoder.py +1 -0
  1215. transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +27 -26
  1216. transformers/models/speech_to_text/configuration_speech_to_text.py +9 -9
  1217. transformers/models/speech_to_text/feature_extraction_speech_to_text.py +13 -10
  1218. transformers/models/speech_to_text/modeling_speech_to_text.py +54 -66
  1219. transformers/models/speech_to_text/processing_speech_to_text.py +30 -4
  1220. transformers/models/speech_to_text/tokenization_speech_to_text.py +6 -5
  1221. transformers/models/speecht5/configuration_speecht5.py +9 -7
  1222. transformers/models/speecht5/feature_extraction_speecht5.py +37 -16
  1223. transformers/models/speecht5/modeling_speecht5.py +175 -213
  1224. transformers/models/speecht5/number_normalizer.py +1 -0
  1225. transformers/models/speecht5/processing_speecht5.py +37 -3
  1226. transformers/models/speecht5/tokenization_speecht5.py +5 -4
  1227. transformers/models/splinter/configuration_splinter.py +7 -6
  1228. transformers/models/splinter/modeling_splinter.py +59 -71
  1229. transformers/models/splinter/tokenization_splinter.py +30 -9
  1230. transformers/models/squeezebert/configuration_squeezebert.py +2 -14
  1231. transformers/models/squeezebert/modeling_squeezebert.py +62 -68
  1232. transformers/models/squeezebert/tokenization_squeezebert.py +1 -0
  1233. transformers/models/stablelm/configuration_stablelm.py +29 -24
  1234. transformers/models/stablelm/modeling_stablelm.py +45 -44
  1235. transformers/models/starcoder2/configuration_starcoder2.py +27 -30
  1236. transformers/models/starcoder2/modeling_starcoder2.py +41 -39
  1237. transformers/models/starcoder2/modular_starcoder2.py +16 -14
  1238. transformers/models/superglue/configuration_superglue.py +3 -7
  1239. transformers/models/superglue/image_processing_superglue.py +15 -15
  1240. transformers/models/superglue/image_processing_superglue_fast.py +10 -9
  1241. transformers/models/superglue/modeling_superglue.py +37 -42
  1242. transformers/models/superpoint/image_processing_superpoint.py +15 -15
  1243. transformers/models/superpoint/image_processing_superpoint_fast.py +11 -8
  1244. transformers/models/superpoint/modeling_superpoint.py +16 -18
  1245. transformers/models/swiftformer/configuration_swiftformer.py +1 -0
  1246. transformers/models/swiftformer/modeling_swiftformer.py +14 -18
  1247. transformers/models/swin/configuration_swin.py +1 -0
  1248. transformers/models/swin/modeling_swin.py +86 -86
  1249. transformers/models/swin2sr/configuration_swin2sr.py +1 -0
  1250. transformers/models/swin2sr/image_processing_swin2sr.py +13 -10
  1251. transformers/models/swin2sr/image_processing_swin2sr_fast.py +8 -4
  1252. transformers/models/swin2sr/modeling_swin2sr.py +63 -81
  1253. transformers/models/swinv2/configuration_swinv2.py +1 -0
  1254. transformers/models/swinv2/modeling_swinv2.py +104 -108
  1255. transformers/models/switch_transformers/configuration_switch_transformers.py +7 -11
  1256. transformers/models/switch_transformers/modeling_switch_transformers.py +44 -37
  1257. transformers/models/switch_transformers/modular_switch_transformers.py +41 -34
  1258. transformers/models/t5/configuration_t5.py +8 -14
  1259. transformers/models/t5/modeling_t5.py +92 -88
  1260. transformers/models/t5/tokenization_t5.py +9 -3
  1261. transformers/models/t5gemma/configuration_t5gemma.py +41 -43
  1262. transformers/models/t5gemma/modeling_t5gemma.py +107 -104
  1263. transformers/models/t5gemma/modular_t5gemma.py +120 -124
  1264. transformers/models/t5gemma2/configuration_t5gemma2.py +120 -80
  1265. transformers/models/t5gemma2/modeling_t5gemma2.py +125 -141
  1266. transformers/models/t5gemma2/modular_t5gemma2.py +104 -393
  1267. transformers/models/table_transformer/configuration_table_transformer.py +2 -1
  1268. transformers/models/table_transformer/modeling_table_transformer.py +49 -51
  1269. transformers/models/tapas/configuration_tapas.py +2 -12
  1270. transformers/models/tapas/modeling_tapas.py +67 -68
  1271. transformers/models/tapas/tokenization_tapas.py +153 -115
  1272. transformers/models/textnet/configuration_textnet.py +1 -0
  1273. transformers/models/textnet/image_processing_textnet.py +25 -22
  1274. transformers/models/textnet/image_processing_textnet_fast.py +10 -8
  1275. transformers/models/textnet/modeling_textnet.py +16 -28
  1276. transformers/models/time_series_transformer/configuration_time_series_transformer.py +8 -5
  1277. transformers/models/time_series_transformer/modeling_time_series_transformer.py +81 -83
  1278. transformers/models/timesfm/configuration_timesfm.py +1 -0
  1279. transformers/models/timesfm/modeling_timesfm.py +22 -33
  1280. transformers/models/timesfm/modular_timesfm.py +21 -32
  1281. transformers/models/timesformer/configuration_timesformer.py +1 -0
  1282. transformers/models/timesformer/modeling_timesformer.py +16 -15
  1283. transformers/models/timm_backbone/configuration_timm_backbone.py +1 -0
  1284. transformers/models/timm_backbone/modeling_timm_backbone.py +15 -17
  1285. transformers/models/timm_wrapper/configuration_timm_wrapper.py +3 -5
  1286. transformers/models/timm_wrapper/image_processing_timm_wrapper.py +5 -4
  1287. transformers/models/timm_wrapper/modeling_timm_wrapper.py +29 -34
  1288. transformers/models/trocr/configuration_trocr.py +8 -11
  1289. transformers/models/trocr/modeling_trocr.py +44 -45
  1290. transformers/models/trocr/processing_trocr.py +25 -5
  1291. transformers/models/tvp/configuration_tvp.py +2 -5
  1292. transformers/models/tvp/image_processing_tvp.py +52 -50
  1293. transformers/models/tvp/image_processing_tvp_fast.py +15 -15
  1294. transformers/models/tvp/modeling_tvp.py +27 -27
  1295. transformers/models/tvp/processing_tvp.py +14 -2
  1296. transformers/models/udop/configuration_udop.py +7 -16
  1297. transformers/models/udop/modeling_udop.py +73 -71
  1298. transformers/models/udop/processing_udop.py +26 -7
  1299. transformers/models/udop/tokenization_udop.py +105 -84
  1300. transformers/models/umt5/configuration_umt5.py +7 -8
  1301. transformers/models/umt5/modeling_umt5.py +90 -94
  1302. transformers/models/unispeech/configuration_unispeech.py +2 -4
  1303. transformers/models/unispeech/modeling_unispeech.py +49 -51
  1304. transformers/models/unispeech/modular_unispeech.py +22 -22
  1305. transformers/models/unispeech_sat/configuration_unispeech_sat.py +2 -4
  1306. transformers/models/unispeech_sat/modeling_unispeech_sat.py +65 -69
  1307. transformers/models/unispeech_sat/modular_unispeech_sat.py +23 -23
  1308. transformers/models/univnet/feature_extraction_univnet.py +14 -14
  1309. transformers/models/univnet/modeling_univnet.py +8 -8
  1310. transformers/models/upernet/configuration_upernet.py +1 -0
  1311. transformers/models/upernet/modeling_upernet.py +13 -11
  1312. transformers/models/vaultgemma/__init__.py +1 -0
  1313. transformers/models/vaultgemma/configuration_vaultgemma.py +33 -29
  1314. transformers/models/vaultgemma/modeling_vaultgemma.py +41 -39
  1315. transformers/models/vaultgemma/modular_vaultgemma.py +31 -29
  1316. transformers/models/video_llama_3/configuration_video_llama_3.py +0 -4
  1317. transformers/models/video_llama_3/image_processing_video_llama_3.py +42 -43
  1318. transformers/models/video_llama_3/image_processing_video_llama_3_fast.py +14 -12
  1319. transformers/models/video_llama_3/modeling_video_llama_3.py +109 -157
  1320. transformers/models/video_llama_3/modular_video_llama_3.py +146 -155
  1321. transformers/models/video_llama_3/processing_video_llama_3.py +39 -5
  1322. transformers/models/video_llama_3/video_processing_video_llama_3.py +23 -42
  1323. transformers/models/video_llava/configuration_video_llava.py +1 -4
  1324. transformers/models/video_llava/image_processing_video_llava.py +38 -35
  1325. transformers/models/video_llava/modeling_video_llava.py +146 -146
  1326. transformers/models/video_llava/processing_video_llava.py +78 -38
  1327. transformers/models/video_llava/video_processing_video_llava.py +1 -0
  1328. transformers/models/videomae/configuration_videomae.py +1 -0
  1329. transformers/models/videomae/image_processing_videomae.py +34 -31
  1330. transformers/models/videomae/modeling_videomae.py +17 -14
  1331. transformers/models/videomae/video_processing_videomae.py +1 -0
  1332. transformers/models/vilt/configuration_vilt.py +4 -6
  1333. transformers/models/vilt/image_processing_vilt.py +30 -29
  1334. transformers/models/vilt/image_processing_vilt_fast.py +16 -15
  1335. transformers/models/vilt/modeling_vilt.py +90 -116
  1336. transformers/models/vilt/processing_vilt.py +14 -2
  1337. transformers/models/vipllava/configuration_vipllava.py +1 -4
  1338. transformers/models/vipllava/modeling_vipllava.py +70 -99
  1339. transformers/models/vipllava/modular_vipllava.py +54 -78
  1340. transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +1 -0
  1341. transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +27 -28
  1342. transformers/models/vision_text_dual_encoder/configuration_vision_text_dual_encoder.py +1 -0
  1343. transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +41 -46
  1344. transformers/models/vision_text_dual_encoder/processing_vision_text_dual_encoder.py +16 -2
  1345. transformers/models/visual_bert/configuration_visual_bert.py +2 -6
  1346. transformers/models/visual_bert/modeling_visual_bert.py +92 -98
  1347. transformers/models/vit/configuration_vit.py +1 -0
  1348. transformers/models/vit/image_processing_vit.py +22 -19
  1349. transformers/models/vit/image_processing_vit_fast.py +1 -0
  1350. transformers/models/vit/modeling_vit.py +17 -17
  1351. transformers/models/vit_mae/configuration_vit_mae.py +1 -0
  1352. transformers/models/vit_mae/modeling_vit_mae.py +27 -29
  1353. transformers/models/vit_msn/configuration_vit_msn.py +1 -0
  1354. transformers/models/vit_msn/modeling_vit_msn.py +16 -18
  1355. transformers/models/vitdet/configuration_vitdet.py +1 -0
  1356. transformers/models/vitdet/modeling_vitdet.py +14 -14
  1357. transformers/models/vitmatte/configuration_vitmatte.py +5 -2
  1358. transformers/models/vitmatte/image_processing_vitmatte.py +18 -15
  1359. transformers/models/vitmatte/image_processing_vitmatte_fast.py +18 -16
  1360. transformers/models/vitmatte/modeling_vitmatte.py +11 -14
  1361. transformers/models/vitpose/configuration_vitpose.py +7 -4
  1362. transformers/models/vitpose/image_processing_vitpose.py +25 -24
  1363. transformers/models/vitpose/image_processing_vitpose_fast.py +11 -9
  1364. transformers/models/vitpose/modeling_vitpose.py +14 -14
  1365. transformers/models/vitpose_backbone/configuration_vitpose_backbone.py +1 -0
  1366. transformers/models/vitpose_backbone/modeling_vitpose_backbone.py +10 -8
  1367. transformers/models/vits/configuration_vits.py +1 -4
  1368. transformers/models/vits/modeling_vits.py +42 -44
  1369. transformers/models/vits/tokenization_vits.py +4 -3
  1370. transformers/models/vivit/configuration_vivit.py +1 -0
  1371. transformers/models/vivit/image_processing_vivit.py +39 -36
  1372. transformers/models/vivit/modeling_vivit.py +8 -6
  1373. transformers/models/vjepa2/__init__.py +1 -0
  1374. transformers/models/vjepa2/configuration_vjepa2.py +1 -0
  1375. transformers/models/vjepa2/modeling_vjepa2.py +32 -31
  1376. transformers/models/vjepa2/video_processing_vjepa2.py +1 -0
  1377. transformers/models/voxtral/__init__.py +1 -0
  1378. transformers/models/voxtral/configuration_voxtral.py +2 -0
  1379. transformers/models/voxtral/modeling_voxtral.py +47 -40
  1380. transformers/models/voxtral/modular_voxtral.py +40 -37
  1381. transformers/models/voxtral/processing_voxtral.py +48 -25
  1382. transformers/models/wav2vec2/configuration_wav2vec2.py +2 -4
  1383. transformers/models/wav2vec2/feature_extraction_wav2vec2.py +10 -7
  1384. transformers/models/wav2vec2/modeling_wav2vec2.py +121 -73
  1385. transformers/models/wav2vec2/processing_wav2vec2.py +35 -6
  1386. transformers/models/wav2vec2/tokenization_wav2vec2.py +332 -20
  1387. transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py +2 -4
  1388. transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py +62 -70
  1389. transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py +48 -57
  1390. transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py +35 -6
  1391. transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +2 -4
  1392. transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +77 -90
  1393. transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py +30 -37
  1394. transformers/models/wav2vec2_phoneme/tokenization_wav2vec2_phoneme.py +17 -16
  1395. transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +55 -36
  1396. transformers/models/wavlm/configuration_wavlm.py +2 -4
  1397. transformers/models/wavlm/modeling_wavlm.py +48 -50
  1398. transformers/models/wavlm/modular_wavlm.py +5 -4
  1399. transformers/models/whisper/configuration_whisper.py +5 -6
  1400. transformers/models/whisper/english_normalizer.py +4 -3
  1401. transformers/models/whisper/feature_extraction_whisper.py +24 -9
  1402. transformers/models/whisper/generation_whisper.py +48 -26
  1403. transformers/models/whisper/modeling_whisper.py +73 -79
  1404. transformers/models/whisper/processing_whisper.py +20 -3
  1405. transformers/models/whisper/tokenization_whisper.py +43 -11
  1406. transformers/models/x_clip/configuration_x_clip.py +2 -4
  1407. transformers/models/x_clip/modeling_x_clip.py +93 -96
  1408. transformers/models/x_clip/processing_x_clip.py +14 -2
  1409. transformers/models/xcodec/configuration_xcodec.py +6 -4
  1410. transformers/models/xcodec/modeling_xcodec.py +17 -20
  1411. transformers/models/xglm/configuration_xglm.py +8 -9
  1412. transformers/models/xglm/modeling_xglm.py +55 -60
  1413. transformers/models/xglm/tokenization_xglm.py +11 -3
  1414. transformers/models/xlm/configuration_xlm.py +8 -10
  1415. transformers/models/xlm/modeling_xlm.py +144 -144
  1416. transformers/models/xlm/tokenization_xlm.py +5 -3
  1417. transformers/models/xlm_roberta/configuration_xlm_roberta.py +3 -11
  1418. transformers/models/xlm_roberta/modeling_xlm_roberta.py +194 -195
  1419. transformers/models/xlm_roberta/modular_xlm_roberta.py +53 -50
  1420. transformers/models/xlm_roberta/tokenization_xlm_roberta.py +18 -8
  1421. transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py +2 -10
  1422. transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +93 -94
  1423. transformers/models/xlm_roberta_xl/modular_xlm_roberta_xl.py +70 -67
  1424. transformers/models/xlnet/configuration_xlnet.py +12 -3
  1425. transformers/models/xlnet/modeling_xlnet.py +163 -152
  1426. transformers/models/xlnet/tokenization_xlnet.py +9 -2
  1427. transformers/models/xlstm/configuration_xlstm.py +12 -8
  1428. transformers/models/xlstm/modeling_xlstm.py +65 -62
  1429. transformers/models/xmod/configuration_xmod.py +3 -11
  1430. transformers/models/xmod/modeling_xmod.py +110 -108
  1431. transformers/models/yolos/configuration_yolos.py +1 -0
  1432. transformers/models/yolos/image_processing_yolos.py +62 -60
  1433. transformers/models/yolos/image_processing_yolos_fast.py +45 -42
  1434. transformers/models/yolos/modeling_yolos.py +16 -16
  1435. transformers/models/yolos/modular_yolos.py +19 -17
  1436. transformers/models/yoso/configuration_yoso.py +2 -8
  1437. transformers/models/yoso/modeling_yoso.py +63 -70
  1438. transformers/models/zamba/configuration_zamba.py +8 -5
  1439. transformers/models/zamba/modeling_zamba.py +78 -81
  1440. transformers/models/zamba2/configuration_zamba2.py +50 -44
  1441. transformers/models/zamba2/modeling_zamba2.py +97 -97
  1442. transformers/models/zamba2/modular_zamba2.py +48 -46
  1443. transformers/models/zoedepth/configuration_zoedepth.py +2 -1
  1444. transformers/models/zoedepth/image_processing_zoedepth.py +29 -28
  1445. transformers/models/zoedepth/image_processing_zoedepth_fast.py +24 -21
  1446. transformers/models/zoedepth/modeling_zoedepth.py +18 -26
  1447. transformers/pipelines/__init__.py +114 -57
  1448. transformers/pipelines/any_to_any.py +22 -14
  1449. transformers/pipelines/audio_utils.py +2 -1
  1450. transformers/pipelines/automatic_speech_recognition.py +12 -20
  1451. transformers/pipelines/base.py +27 -15
  1452. transformers/{models/pe_audio/processing_pe_audio.py → pipelines/deprecated/__init__.py} +3 -10
  1453. transformers/pipelines/deprecated/text2text_generation.py +408 -0
  1454. transformers/pipelines/document_question_answering.py +2 -4
  1455. transformers/pipelines/image_text_to_text.py +1 -0
  1456. transformers/pipelines/image_to_text.py +229 -0
  1457. transformers/pipelines/question_answering.py +44 -5
  1458. transformers/pipelines/text_classification.py +14 -1
  1459. transformers/pipelines/text_generation.py +1 -1
  1460. transformers/pipelines/text_to_audio.py +2 -2
  1461. transformers/pipelines/token_classification.py +22 -1
  1462. transformers/pipelines/video_classification.py +9 -1
  1463. transformers/pipelines/zero_shot_audio_classification.py +1 -0
  1464. transformers/pipelines/zero_shot_classification.py +6 -0
  1465. transformers/pipelines/zero_shot_image_classification.py +7 -0
  1466. transformers/processing_utils.py +145 -230
  1467. transformers/quantizers/auto.py +4 -2
  1468. transformers/quantizers/base.py +173 -53
  1469. transformers/quantizers/quantizer_aqlm.py +23 -2
  1470. transformers/quantizers/quantizer_auto_round.py +12 -2
  1471. transformers/quantizers/quantizer_awq.py +89 -20
  1472. transformers/quantizers/quantizer_bitnet.py +14 -4
  1473. transformers/quantizers/quantizer_bnb_4bit.py +155 -18
  1474. transformers/quantizers/quantizer_bnb_8bit.py +110 -24
  1475. transformers/quantizers/quantizer_compressed_tensors.py +9 -2
  1476. transformers/quantizers/quantizer_eetq.py +74 -16
  1477. transformers/quantizers/quantizer_fbgemm_fp8.py +138 -38
  1478. transformers/quantizers/quantizer_finegrained_fp8.py +113 -26
  1479. transformers/quantizers/quantizer_fp_quant.py +82 -52
  1480. transformers/quantizers/quantizer_gptq.py +28 -8
  1481. transformers/quantizers/quantizer_higgs.py +60 -42
  1482. transformers/quantizers/quantizer_hqq.py +153 -144
  1483. transformers/quantizers/quantizer_mxfp4.py +194 -14
  1484. transformers/quantizers/quantizer_quanto.py +79 -35
  1485. transformers/quantizers/quantizer_quark.py +18 -36
  1486. transformers/quantizers/quantizer_spqr.py +12 -4
  1487. transformers/quantizers/quantizer_torchao.py +325 -50
  1488. transformers/quantizers/quantizer_vptq.py +27 -4
  1489. transformers/quantizers/quantizers_utils.py +0 -20
  1490. transformers/safetensors_conversion.py +3 -9
  1491. transformers/testing_utils.py +82 -326
  1492. transformers/tokenization_mistral_common.py +903 -568
  1493. transformers/tokenization_utils_base.py +340 -220
  1494. transformers/tokenization_utils_sentencepiece.py +6 -5
  1495. transformers/tokenization_utils_tokenizers.py +113 -226
  1496. transformers/trainer.py +53 -60
  1497. transformers/trainer_callback.py +0 -8
  1498. transformers/trainer_seq2seq.py +1 -5
  1499. transformers/trainer_utils.py +1 -1
  1500. transformers/training_args.py +41 -77
  1501. transformers/utils/__init__.py +4 -8
  1502. transformers/utils/attention_visualizer.py +5 -5
  1503. transformers/utils/auto_docstring.py +37 -599
  1504. transformers/utils/doc.py +36 -4
  1505. transformers/utils/dummy_pt_objects.py +42 -0
  1506. transformers/utils/generic.py +28 -111
  1507. transformers/utils/hub.py +15 -5
  1508. transformers/utils/import_utils.py +32 -165
  1509. transformers/utils/kernel_config.py +19 -74
  1510. transformers/utils/loading_report.py +15 -25
  1511. transformers/utils/quantization_config.py +241 -72
  1512. transformers/video_processing_utils.py +39 -41
  1513. transformers/video_utils.py +22 -18
  1514. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/METADATA +236 -284
  1515. transformers-5.0.0rc0.dist-info/RECORD +1987 -0
  1516. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/WHEEL +1 -1
  1517. transformers/integrations/moe.py +0 -360
  1518. transformers/integrations/quark.py +0 -53
  1519. transformers/loss/loss_lw_detr.py +0 -356
  1520. transformers/models/ernie4_5_vl_moe/__init__.py +0 -31
  1521. transformers/models/ernie4_5_vl_moe/configuration_ernie4_5_vl_moe.py +0 -340
  1522. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe.py +0 -455
  1523. transformers/models/ernie4_5_vl_moe/image_processing_ernie4_5_vl_moe_fast.py +0 -231
  1524. transformers/models/ernie4_5_vl_moe/modeling_ernie4_5_vl_moe.py +0 -1936
  1525. transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py +0 -1925
  1526. transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py +0 -249
  1527. transformers/models/ernie4_5_vl_moe/video_processing_ernie4_5_vl_moe.py +0 -593
  1528. transformers/models/fast_vlm/__init__.py +0 -27
  1529. transformers/models/fast_vlm/configuration_fast_vlm.py +0 -137
  1530. transformers/models/fast_vlm/modeling_fast_vlm.py +0 -432
  1531. transformers/models/fast_vlm/modular_fast_vlm.py +0 -373
  1532. transformers/models/glm4_moe_lite/__init__.py +0 -28
  1533. transformers/models/glm4_moe_lite/configuration_glm4_moe_lite.py +0 -233
  1534. transformers/models/glm4_moe_lite/modeling_glm4_moe_lite.py +0 -740
  1535. transformers/models/glm4_moe_lite/modular_glm4_moe_lite.py +0 -302
  1536. transformers/models/glm_image/__init__.py +0 -31
  1537. transformers/models/glm_image/configuration_glm_image.py +0 -351
  1538. transformers/models/glm_image/image_processing_glm_image.py +0 -503
  1539. transformers/models/glm_image/image_processing_glm_image_fast.py +0 -294
  1540. transformers/models/glm_image/modeling_glm_image.py +0 -1642
  1541. transformers/models/glm_image/modular_glm_image.py +0 -1531
  1542. transformers/models/glm_image/processing_glm_image.py +0 -217
  1543. transformers/models/glmasr/__init__.py +0 -29
  1544. transformers/models/glmasr/configuration_glmasr.py +0 -196
  1545. transformers/models/glmasr/modeling_glmasr.py +0 -517
  1546. transformers/models/glmasr/modular_glmasr.py +0 -443
  1547. transformers/models/glmasr/processing_glmasr.py +0 -331
  1548. transformers/models/jais2/__init__.py +0 -27
  1549. transformers/models/jais2/configuration_jais2.py +0 -148
  1550. transformers/models/jais2/modeling_jais2.py +0 -484
  1551. transformers/models/jais2/modular_jais2.py +0 -194
  1552. transformers/models/lasr/__init__.py +0 -29
  1553. transformers/models/lasr/configuration_lasr.py +0 -244
  1554. transformers/models/lasr/feature_extraction_lasr.py +0 -275
  1555. transformers/models/lasr/modeling_lasr.py +0 -727
  1556. transformers/models/lasr/modular_lasr.py +0 -574
  1557. transformers/models/lasr/processing_lasr.py +0 -100
  1558. transformers/models/lasr/tokenization_lasr.py +0 -184
  1559. transformers/models/lighton_ocr/__init__.py +0 -28
  1560. transformers/models/lighton_ocr/configuration_lighton_ocr.py +0 -128
  1561. transformers/models/lighton_ocr/modeling_lighton_ocr.py +0 -463
  1562. transformers/models/lighton_ocr/modular_lighton_ocr.py +0 -404
  1563. transformers/models/lighton_ocr/processing_lighton_ocr.py +0 -229
  1564. transformers/models/lw_detr/__init__.py +0 -27
  1565. transformers/models/lw_detr/configuration_lw_detr.py +0 -374
  1566. transformers/models/lw_detr/modeling_lw_detr.py +0 -1702
  1567. transformers/models/lw_detr/modular_lw_detr.py +0 -1615
  1568. transformers/models/minimax_m2/__init__.py +0 -28
  1569. transformers/models/minimax_m2/configuration_minimax_m2.py +0 -188
  1570. transformers/models/minimax_m2/modeling_minimax_m2.py +0 -704
  1571. transformers/models/minimax_m2/modular_minimax_m2.py +0 -346
  1572. transformers/models/paddleocr_vl/__init__.py +0 -31
  1573. transformers/models/paddleocr_vl/configuration_paddleocr_vl.py +0 -335
  1574. transformers/models/paddleocr_vl/image_processing_paddleocr_vl.py +0 -503
  1575. transformers/models/paddleocr_vl/image_processing_paddleocr_vl_fast.py +0 -209
  1576. transformers/models/paddleocr_vl/modeling_paddleocr_vl.py +0 -1683
  1577. transformers/models/paddleocr_vl/modular_paddleocr_vl.py +0 -1380
  1578. transformers/models/paddleocr_vl/processing_paddleocr_vl.py +0 -133
  1579. transformers/models/pe_audio/__init__.py +0 -29
  1580. transformers/models/pe_audio/configuration_pe_audio.py +0 -204
  1581. transformers/models/pe_audio/feature_extraction_pe_audio.py +0 -160
  1582. transformers/models/pe_audio/modeling_pe_audio.py +0 -819
  1583. transformers/models/pe_audio/modular_pe_audio.py +0 -298
  1584. transformers/models/pe_audio_video/__init__.py +0 -28
  1585. transformers/models/pe_audio_video/configuration_pe_audio_video.py +0 -223
  1586. transformers/models/pe_audio_video/modeling_pe_audio_video.py +0 -971
  1587. transformers/models/pe_audio_video/modular_pe_audio_video.py +0 -763
  1588. transformers/models/pe_video/__init__.py +0 -29
  1589. transformers/models/pe_video/configuration_pe_video.py +0 -209
  1590. transformers/models/pe_video/modeling_pe_video.py +0 -647
  1591. transformers/models/pe_video/modular_pe_video.py +0 -231
  1592. transformers/models/pe_video/processing_pe_video.py +0 -10
  1593. transformers/models/pe_video/video_processing_pe_video.py +0 -64
  1594. transformers/models/pixio/__init__.py +0 -29
  1595. transformers/models/pixio/configuration_pixio.py +0 -150
  1596. transformers/models/pixio/modeling_pixio.py +0 -507
  1597. transformers/models/pixio/modular_pixio.py +0 -403
  1598. transformers/models/solar_open/__init__.py +0 -27
  1599. transformers/models/solar_open/configuration_solar_open.py +0 -184
  1600. transformers/models/solar_open/modeling_solar_open.py +0 -642
  1601. transformers/models/solar_open/modular_solar_open.py +0 -224
  1602. transformers/trainer_jit_checkpoint.py +0 -125
  1603. transformers-5.0.0.dist-info/RECORD +0 -2068
  1604. {transformers-5.0.0.dist-info/licenses → transformers-5.0.0rc0.dist-info}/LICENSE +0 -0
  1605. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/entry_points.txt +0 -0
  1606. {transformers-5.0.0.dist-info → transformers-5.0.0rc0.dist-info}/top_level.txt +0 -0
@@ -1,1936 +0,0 @@
1
- # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
- # This file was automatically generated from src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py.
3
- # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
- # the file from the modular. If any change should be done, please apply the change to the
5
- # modular_ernie4_5_vl_moe.py file directly. One of our CI enforces this.
6
- # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
- # Copyright 2025 Baidu and HuggingFace Inc. team. All rights reserved.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
-
21
- import itertools
22
- from collections.abc import Callable
23
- from typing import Any, Optional
24
-
25
- import torch
26
- import torch.nn as nn
27
- import torch.nn.functional as F
28
-
29
- from ... import initialization as init
30
- from ...activations import ACT2FN
31
- from ...cache_utils import Cache, DynamicCache
32
- from ...generation import GenerationMixin
33
- from ...integrations import use_experts_implementation, use_kernel_forward_from_hub, use_kernelized_func
34
- from ...masking_utils import create_causal_mask
35
- from ...modeling_flash_attention_utils import FlashAttentionKwargs
36
- from ...modeling_layers import GradientCheckpointingLayer
37
- from ...modeling_outputs import BaseModelOutputWithPooling, MoeCausalLMOutputWithPast, MoeModelOutputWithPast
38
- from ...modeling_rope_utils import dynamic_rope_update
39
- from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
40
- from ...processing_utils import Unpack
41
- from ...utils import (
42
- TransformersKwargs,
43
- auto_docstring,
44
- can_return_tuple,
45
- is_torchdynamo_compiling,
46
- torch_compilable_check,
47
- )
48
- from ...utils.generic import OutputRecorder, check_model_inputs, is_flash_attention_requested, maybe_autocast
49
- from .configuration_ernie4_5_vl_moe import (
50
- Ernie4_5_VL_MoeConfig,
51
- Ernie4_5_VL_MoeTextConfig,
52
- Ernie4_5_VL_MoeVisionConfig,
53
- )
54
-
55
-
56
- class Ernie4_5_VL_MoeTextRotaryEmbedding(nn.Module):
57
- inv_freq: torch.Tensor # fix linting for `register_buffer`
58
-
59
- def __init__(self, config, device=None):
60
- super().__init__()
61
- self.max_seq_len_cached = config.max_position_embeddings
62
- self.original_max_seq_len = config.max_position_embeddings
63
-
64
- self.config = config
65
-
66
- self.rope_type = self.config.rope_parameters["rope_type"]
67
- rope_init_fn: Callable = self.compute_default_rope_parameters
68
- if self.rope_type != "default":
69
- raise ValueError(f"Ernie 4.5 VL requires the `default` rope type, but found {self.rope_type} instead.")
70
- inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
71
-
72
- self.register_buffer("inv_freq", inv_freq, persistent=False)
73
- self.original_inv_freq = inv_freq
74
-
75
- self.mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
76
-
77
- @staticmethod
78
- def compute_default_rope_parameters(
79
- config: Ernie4_5_VL_MoeTextConfig | None = None,
80
- device: Optional["torch.device"] = None,
81
- seq_len: int | None = None,
82
- ) -> tuple["torch.Tensor", float]:
83
- """
84
- Computes the inverse frequencies according to the original RoPE implementation
85
- Args:
86
- config ([`~transformers.PreTrainedConfig`]):
87
- The model configuration.
88
- device (`torch.device`):
89
- The device to use for initialization of the inverse frequencies.
90
- seq_len (`int`, *optional*):
91
- The current sequence length. Unused for this type of RoPE.
92
- Returns:
93
- Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
94
- post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
95
- """
96
- base = config.rope_parameters["rope_theta"]
97
- dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
98
-
99
- attention_factor = 1.0 # Unused in this type of RoPE
100
-
101
- # Compute the inverse frequencies
102
- inv_freq = 1.0 / (
103
- base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
104
- )
105
-
106
- # Special to ernie, we prerotate on the hw dim
107
- mrope_section = config.rope_parameters.get("mrope_section", [22, 22, 20])
108
- hw_dim = mrope_section[0] + mrope_section[1]
109
- t_dim = mrope_section[2]
110
-
111
- inv_freq_3d = torch.empty_like(inv_freq)
112
- # (Pre-)Rotate to avoid another rotation during the forward
113
- inv_freq_3d[:hw_dim] = torch.cat([inv_freq[:-t_dim][0::2], inv_freq[:-t_dim][1::2]])
114
- inv_freq_3d[-t_dim:] = inv_freq[-t_dim:]
115
-
116
- return inv_freq_3d, attention_factor
117
-
118
- @torch.no_grad()
119
- @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
120
- def forward(self, x, position_ids):
121
- inv_freq_expanded = (
122
- self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1).to(x.device)
123
- )
124
- position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
125
-
126
- device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
127
- with maybe_autocast(device_type=device_type, enabled=False): # Force float32
128
- freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
129
- cos = freqs.cos() * self.attention_scaling
130
- sin = freqs.sin() * self.attention_scaling
131
-
132
- sin = self.recomposition_to_3d(sin)
133
- cos = self.recomposition_to_3d(cos)
134
-
135
- return cos, sin
136
-
137
- def recomposition_to_3d(self, freq):
138
- freq_h, freq_w, freq_t = (m[(i + 1) % 3] for i, m in enumerate(freq.split([*self.mrope_section], dim=-1)))
139
- freq_hw = torch.stack([freq_h, freq_w], dim=-1).flatten(-2)
140
- freq_hwt = torch.cat([freq_hw, freq_t], dim=-1)
141
- return freq_hwt.repeat_interleave(2, dim=-1)
142
-
143
-
144
- def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
145
- """
146
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
147
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
148
- """
149
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
150
- if n_rep == 1:
151
- return hidden_states
152
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
153
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
154
-
155
-
156
- def eager_attention_forward(
157
- module: nn.Module,
158
- query: torch.Tensor,
159
- key: torch.Tensor,
160
- value: torch.Tensor,
161
- attention_mask: torch.Tensor | None,
162
- scaling: float,
163
- dropout: float = 0.0,
164
- **kwargs: Unpack[TransformersKwargs],
165
- ):
166
- key_states = repeat_kv(key, module.num_key_value_groups)
167
- value_states = repeat_kv(value, module.num_key_value_groups)
168
-
169
- attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
170
- if attention_mask is not None:
171
- causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
172
- attn_weights = attn_weights + causal_mask
173
-
174
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
175
- attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
176
- attn_output = torch.matmul(attn_weights, value_states)
177
- attn_output = attn_output.transpose(1, 2).contiguous()
178
-
179
- return attn_output, attn_weights
180
-
181
-
182
- def rotate_half_text(x):
183
- """Rotates half the hidden dims of the input."""
184
- x1 = x[..., 0::2]
185
- x2 = x[..., 1::2]
186
- return torch.stack((-x2, x1), dim=-1).flatten(-2)
187
-
188
-
189
- def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
190
- """Applies Rotary Position Embedding to the query and key tensors.
191
-
192
- Args:
193
- q (`torch.Tensor`): The query tensor.
194
- k (`torch.Tensor`): The key tensor.
195
- cos (`torch.Tensor`): The cosine part of the rotary embedding.
196
- sin (`torch.Tensor`): The sine part of the rotary embedding.
197
- unsqueeze_dim (`int`, *optional*, defaults to 1):
198
- The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
199
- sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
200
- that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
201
- k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
202
- cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
203
- the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
204
- Returns:
205
- `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
206
- """
207
- original_dtype = q.dtype
208
-
209
- cos = cos.unsqueeze(unsqueeze_dim)
210
- sin = sin.unsqueeze(unsqueeze_dim)
211
-
212
- q_embed = (q.float() * cos) + (rotate_half_text(q).float() * sin)
213
- k_embed = (k.float() * cos) + (rotate_half_text(k).float() * sin)
214
-
215
- return q_embed.to(original_dtype), k_embed.to(original_dtype)
216
-
217
-
218
- @use_kernelized_func(apply_rotary_pos_emb)
219
- class Ernie4_5_VL_MoeTextAttention(nn.Module):
220
- """Multi-headed attention from 'Attention Is All You Need' paper"""
221
-
222
- def __init__(self, config: Ernie4_5_VL_MoeConfig, layer_idx: int):
223
- super().__init__()
224
- self.config = config
225
- self.layer_idx = layer_idx
226
- self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
227
- self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
228
- self.scaling = self.head_dim**-0.5
229
-
230
- self.attention_dropout = 0.0
231
- self.is_causal = True
232
-
233
- self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
234
- self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
235
- self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
236
- self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
237
-
238
- def forward(
239
- self,
240
- hidden_states: torch.Tensor,
241
- position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
242
- attention_mask: torch.Tensor | None = None,
243
- past_key_values: Cache | None = None,
244
- cache_position: torch.LongTensor | None = None,
245
- **kwargs: Unpack[TransformersKwargs],
246
- ) -> tuple[torch.Tensor, torch.Tensor]:
247
- input_shape = hidden_states.shape[:-1]
248
- hidden_shape = (*input_shape, -1, self.head_dim)
249
-
250
- query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
251
- key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
252
- value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
253
-
254
- cos, sin = position_embeddings
255
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
256
-
257
- if past_key_values is not None:
258
- # sin and cos are specific to RoPE models; cache_position needed for the static cache
259
- cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
260
- key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
261
-
262
- attention_interface: Callable = eager_attention_forward
263
- if self.config._attn_implementation != "eager":
264
- attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
265
-
266
- attn_output, attn_weights = attention_interface(
267
- self,
268
- query_states,
269
- key_states,
270
- value_states,
271
- attention_mask,
272
- dropout=0.0 if not self.training else self.attention_dropout,
273
- scaling=self.scaling,
274
- **kwargs,
275
- )
276
-
277
- attn_output = attn_output.reshape(*input_shape, -1).contiguous()
278
- attn_output = self.o_proj(attn_output)
279
- return attn_output, attn_weights
280
-
281
-
282
- @use_kernel_forward_from_hub("RMSNorm")
283
- class Ernie4_5_VL_MoeRMSNorm(nn.Module):
284
- def __init__(self, hidden_size, eps=1e-6):
285
- """
286
- Ernie4_5_VL_MoeRMSNorm is equivalent to T5LayerNorm
287
- """
288
- super().__init__()
289
- self.weight = nn.Parameter(torch.ones(hidden_size))
290
- self.variance_epsilon = eps
291
-
292
- def forward(self, hidden_states):
293
- input_dtype = hidden_states.dtype
294
- hidden_states = hidden_states.to(torch.float32)
295
- variance = hidden_states.pow(2).mean(-1, keepdim=True)
296
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
297
- return self.weight * hidden_states.to(input_dtype)
298
-
299
- def extra_repr(self):
300
- return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
301
-
302
-
303
- class Ernie4_5_VL_MoeMLP(nn.Module):
304
- def __init__(self, config, intermediate_size=None):
305
- super().__init__()
306
- self.config = config
307
- self.hidden_size = config.hidden_size
308
- self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
309
-
310
- self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
311
- self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
312
- self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
313
- self.act_fn = ACT2FN[config.hidden_act]
314
-
315
- def forward(self, x):
316
- down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
317
- return down_proj
318
-
319
-
320
- class Ernie4_5_VL_MoeMoeStatics(nn.Module):
321
- """
322
- Stores MoE (Mixture of Experts) statistics
323
- - Bias for the gating
324
- - Additionally, usage per expert in the original codebase
325
- """
326
-
327
- def __init__(self, config):
328
- super().__init__()
329
-
330
- num_experts_groups = 1
331
- num_experts = config.moe_num_experts
332
-
333
- self.e_score_correction_bias = nn.Parameter(
334
- torch.zeros(num_experts_groups, num_experts, dtype=torch.float32),
335
- requires_grad=False,
336
- )
337
-
338
- def forward(self, hidden_states):
339
- # NOTE: This is a workaround to enable TP with a module that only has parameters
340
- #
341
- # Otherwise, it stays as `DTensor` when called in the "super" forward
342
- # 1. All other tensors are local (`torch.Tensor`)
343
- # 2. Isolate does not work on `nn.Module` which only has parameters
344
- return hidden_states + self.e_score_correction_bias.squeeze()
345
-
346
-
347
- class Ernie4_5_VL_MoeMoeTopKRouter(nn.Module):
348
- def __init__(self, config):
349
- super().__init__()
350
- self.weight = nn.Parameter(torch.zeros(config.moe_num_experts, config.hidden_size, dtype=torch.float32))
351
- self.moe_statics = Ernie4_5_VL_MoeMoeStatics(config)
352
- self.top_k = config.moe_k
353
- self.norm_min = config.moe_norm_min
354
-
355
- def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
356
- device_type = (
357
- hidden_states.device.type
358
- if isinstance(hidden_states.device.type, str) and hidden_states.device.type != "mps"
359
- else "cpu"
360
- )
361
-
362
- with maybe_autocast(device_type=device_type, enabled=False): # Force float32
363
- router_logits = F.linear(hidden_states.float(), self.weight.float())
364
- routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
365
- _, selected_experts = torch.topk(self.moe_statics(routing_weights), self.top_k, dim=-1)
366
- routing_weights = torch.gather(routing_weights, dim=-1, index=selected_experts)
367
- routing_weights = routing_weights / torch.clamp(
368
- routing_weights.sum(dim=-1, keepdim=True), min=self.norm_min
369
- )
370
- routing_weights = routing_weights.to(hidden_states.dtype)
371
- return router_logits, selected_experts, routing_weights
372
-
373
-
374
- @use_experts_implementation
375
- class Ernie4_5_VL_MoeMoeExperts(nn.Module):
376
- """Collection of expert weights stored as 3D tensors."""
377
-
378
- def __init__(self, config, intermediate_size=None):
379
- super().__init__()
380
- self.num_experts = config.moe_num_experts
381
- self.hidden_dim = config.hidden_size
382
- self.intermediate_dim = config.moe_intermediate_size if intermediate_size is None else intermediate_size
383
- self.gate_up_proj = nn.Parameter(torch.empty(self.num_experts, 2 * self.intermediate_dim, self.hidden_dim))
384
- self.down_proj = nn.Parameter(torch.empty(self.num_experts, self.hidden_dim, self.intermediate_dim))
385
- self.act_fn = ACT2FN[config.hidden_act]
386
-
387
- def forward(
388
- self,
389
- hidden_states: torch.Tensor,
390
- top_k_index: torch.Tensor,
391
- top_k_weights: torch.Tensor,
392
- ) -> torch.Tensor:
393
- final_hidden_states = torch.zeros_like(hidden_states)
394
- with torch.no_grad():
395
- expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts)
396
- expert_mask = expert_mask.permute(2, 1, 0)
397
- expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
398
-
399
- for expert_idx in expert_hit:
400
- expert_idx = expert_idx[0]
401
- if expert_idx == self.num_experts:
402
- continue
403
- top_k_pos, token_idx = torch.where(expert_mask[expert_idx])
404
- current_state = hidden_states[token_idx]
405
- gate, up = nn.functional.linear(current_state, self.gate_up_proj[expert_idx]).chunk(2, dim=-1)
406
- current_hidden_states = self.act_fn(gate) * up
407
- current_hidden_states = nn.functional.linear(current_hidden_states, self.down_proj[expert_idx])
408
- current_hidden_states = current_hidden_states * top_k_weights[token_idx, top_k_pos, None]
409
- final_hidden_states.index_add_(0, token_idx, current_hidden_states.to(final_hidden_states.dtype))
410
-
411
- return final_hidden_states
412
-
413
-
414
- class Ernie4_5_VL_MoeSparseMoeBlock(nn.Module):
415
- def __init__(self, config, intermediate_size):
416
- super().__init__()
417
- self.hidden_dim = config.hidden_size
418
- self.num_experts = config.moe_num_experts
419
- self.top_k = config.moe_k
420
- self.gate = Ernie4_5_VL_MoeMoeTopKRouter(config)
421
- self.experts = Ernie4_5_VL_MoeMoeExperts(config, intermediate_size)
422
-
423
- def forward(
424
- self,
425
- hidden_states: torch.Tensor,
426
- ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
427
- hidden_states = hidden_states.view(-1, self.hidden_dim)
428
-
429
- router_logits, top_k_index, top_k_weights = self.gate(hidden_states)
430
- final_hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
431
-
432
- # moe results are changed to a flattened shape to ease the modality isolated assigning of results
433
- return final_hidden_states.flatten(), router_logits.flatten()
434
-
435
-
436
- class Ernie4_5_VL_MoeMoeBlock(nn.Module):
437
- """
438
- Similar to `Ernie4_5_Moe` where we have modality isolated experts:
439
- - A set of text experts that are only run on text tokens
440
- - A set of vision experts that are only run on vision (image/video) tokens
441
-
442
- This modality isolation is unique to the Ernie 4.5 VL Moe models.
443
- """
444
-
445
- def __init__(self, config):
446
- super().__init__()
447
- self.num_experts = config.moe_num_experts
448
-
449
- self.text_moe = Ernie4_5_VL_MoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[0])
450
- self.vision_moe = Ernie4_5_VL_MoeSparseMoeBlock(config, intermediate_size=config.moe_intermediate_size[1])
451
-
452
- self.shared_experts = None
453
- if config.moe_num_shared_experts > 0:
454
- self.shared_experts = Ernie4_5_VL_MoeMLP(
455
- config, config.moe_intermediate_size[0] * config.moe_num_shared_experts
456
- )
457
-
458
- def forward(
459
- self,
460
- hidden_states: torch.Tensor,
461
- moe_mm_token_type_ids: torch.IntTensor | None = None,
462
- ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
463
- batch_size, sequence_length, hidden_dim = hidden_states.shape
464
-
465
- # (Optional) shared experts
466
- if self.shared_experts is not None:
467
- shared_output = self.shared_experts(hidden_states)
468
-
469
- if moe_mm_token_type_ids is not None and moe_mm_token_type_ids.any():
470
- final_hidden_states = torch.zeros_like(hidden_states)
471
- router_logits = torch.zeros(
472
- size=(batch_size * sequence_length, self.num_experts),
473
- device=final_hidden_states.device,
474
- dtype=torch.float,
475
- )
476
-
477
- # True (1 or 2) == vision, False (0) == text tokens
478
- moe_mm_token_type_ids = moe_mm_token_type_ids.bool()
479
- token_type_ids_router = moe_mm_token_type_ids.reshape(-1)[:, None].expand(-1, self.num_experts)
480
- token_type_ids_states = moe_mm_token_type_ids[..., None].expand(-1, -1, hidden_dim)
481
-
482
- # Run moe on each modality and assign their results to the original token positions
483
- final_hidden_states[~token_type_ids_states], router_logits[~token_type_ids_router] = self.text_moe(
484
- hidden_states[~token_type_ids_states]
485
- )
486
- final_hidden_states[token_type_ids_states], router_logits[token_type_ids_router] = self.vision_moe(
487
- hidden_states[token_type_ids_states]
488
- )
489
- else:
490
- final_hidden_states, router_logits = self.text_moe(hidden_states)
491
- final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
492
- router_logits = router_logits.reshape(-1, self.num_experts)
493
-
494
- # Add (optional) shared experts to the result
495
- if self.shared_experts is not None:
496
- final_hidden_states = final_hidden_states + shared_output
497
-
498
- return final_hidden_states, router_logits
499
-
500
-
501
- class Ernie4_5_VL_MoeDecoderLayer(GradientCheckpointingLayer):
502
- def __init__(self, config, layer_idx):
503
- super().__init__()
504
- self.hidden_size = config.hidden_size
505
-
506
- self.self_attn = Ernie4_5_VL_MoeTextAttention(config, layer_idx)
507
-
508
- if config.mlp_layer_types[layer_idx] == "sparse":
509
- self.mlp = Ernie4_5_VL_MoeMoeBlock(config)
510
- else:
511
- self.mlp = Ernie4_5_VL_MoeMLP(config)
512
-
513
- self.input_layernorm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
514
- self.post_attention_layernorm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, config.rms_norm_eps)
515
-
516
- def forward(
517
- self,
518
- hidden_states: torch.Tensor,
519
- position_embeddings: tuple[torch.Tensor, torch.Tensor],
520
- attention_mask: torch.Tensor | None = None,
521
- position_ids: torch.Tensor | None = None,
522
- moe_mm_token_type_ids: torch.IntTensor | None = None,
523
- past_key_values: Cache | None = None,
524
- cache_position: torch.LongTensor | None = None,
525
- **kwargs: Unpack[FlashAttentionKwargs],
526
- ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
527
- residual = hidden_states
528
-
529
- hidden_states = self.input_layernorm(hidden_states)
530
-
531
- # Self Attention
532
- hidden_states, _ = self.self_attn(
533
- hidden_states=hidden_states,
534
- position_embeddings=position_embeddings,
535
- attention_mask=attention_mask,
536
- position_ids=position_ids,
537
- past_key_values=past_key_values,
538
- cache_position=cache_position,
539
- **kwargs,
540
- )
541
- hidden_states = hidden_states + residual
542
-
543
- # Fully Connected
544
- residual = hidden_states
545
- hidden_states = self.post_attention_layernorm(hidden_states)
546
- if isinstance(self.mlp, Ernie4_5_VL_MoeMoeBlock):
547
- hidden_states, _ = self.mlp(hidden_states, moe_mm_token_type_ids)
548
- else:
549
- hidden_states = self.mlp(hidden_states)
550
- hidden_states = hidden_states + residual
551
-
552
- return hidden_states
553
-
554
-
555
- def rotate_half(x):
556
- """Rotates half the hidden dims of the input."""
557
- x1 = x[..., : x.shape[-1] // 2]
558
- x2 = x[..., x.shape[-1] // 2 :]
559
- return torch.cat((-x2, x1), dim=-1)
560
-
561
-
562
- def apply_rotary_pos_emb_vision(
563
- q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
564
- ) -> tuple[torch.Tensor, torch.Tensor]:
565
- orig_q_dtype = q.dtype
566
- orig_k_dtype = k.dtype
567
- q, k = q.float(), k.float()
568
- cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
569
- q_embed = (q * cos) + (rotate_half(q) * sin)
570
- k_embed = (k * cos) + (rotate_half(k) * sin)
571
- q_embed = q_embed.to(orig_q_dtype)
572
- k_embed = k_embed.to(orig_k_dtype)
573
- return q_embed, k_embed
574
-
575
-
576
- class Ernie4_5_VL_MoeVisionAttention(nn.Module):
577
- def __init__(self, config: Ernie4_5_VL_MoeVisionConfig) -> None:
578
- super().__init__()
579
- self.dim = config.hidden_size
580
- self.num_heads = config.num_heads
581
- self.head_dim = self.dim // self.num_heads
582
- self.num_key_value_groups = 1 # needed for eager attention
583
- self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
584
- self.proj = nn.Linear(self.dim, self.dim)
585
- self.scaling = self.head_dim**-0.5
586
- self.config = config
587
- self.attention_dropout = 0.0
588
- self.is_causal = False
589
-
590
- def forward(
591
- self,
592
- hidden_states: torch.Tensor,
593
- cu_seqlens: torch.Tensor,
594
- rotary_pos_emb: torch.Tensor | None = None,
595
- position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
596
- **kwargs,
597
- ) -> torch.Tensor:
598
- seq_length = hidden_states.shape[0]
599
- query_states, key_states, value_states = (
600
- self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
601
- )
602
- cos, sin = position_embeddings
603
- query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
604
-
605
- query_states = query_states.transpose(0, 1).unsqueeze(0)
606
- key_states = key_states.transpose(0, 1).unsqueeze(0)
607
- value_states = value_states.transpose(0, 1).unsqueeze(0)
608
-
609
- attention_interface: Callable = eager_attention_forward
610
- if self.config._attn_implementation != "eager":
611
- attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
612
-
613
- if is_flash_attention_requested(self.config):
614
- # Flash Attention: Use cu_seqlens for variable length attention
615
- max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
616
- attn_output, _ = attention_interface(
617
- self,
618
- query_states,
619
- key_states,
620
- value_states,
621
- attention_mask=None,
622
- scaling=self.scaling,
623
- dropout=0.0 if not self.training else self.attention_dropout,
624
- cu_seq_lens_q=cu_seqlens,
625
- cu_seq_lens_k=cu_seqlens,
626
- max_length_q=max_seqlen,
627
- max_length_k=max_seqlen,
628
- is_causal=False,
629
- **kwargs,
630
- )
631
- else:
632
- # Other implementations: Process each chunk separately
633
- lengths = cu_seqlens[1:] - cu_seqlens[:-1]
634
- splits = [
635
- torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
636
- ]
637
-
638
- attn_outputs = [
639
- attention_interface(
640
- self,
641
- q,
642
- k,
643
- v,
644
- attention_mask=None,
645
- scaling=self.scaling,
646
- dropout=0.0 if not self.training else self.attention_dropout,
647
- is_causal=False,
648
- **kwargs,
649
- )[0]
650
- for q, k, v in zip(*splits)
651
- ]
652
- attn_output = torch.cat(attn_outputs, dim=1)
653
-
654
- attn_output = attn_output.reshape(seq_length, -1).contiguous()
655
- attn_output = self.proj(attn_output)
656
- return attn_output
657
-
658
-
659
- class Ernie4_5_VL_MoeVisionBlock(GradientCheckpointingLayer):
660
- def __init__(self, config) -> None:
661
- super().__init__()
662
-
663
- self.norm1 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
664
- self.norm2 = nn.LayerNorm(config.hidden_size, config.rms_norm_eps)
665
- self.attn = Ernie4_5_VL_MoeVisionAttention(config=config)
666
- self.mlp = Ernie4_5VLVisionMLP(
667
- dim=config.hidden_size,
668
- hidden_dim=config.intermediate_size,
669
- hidden_act=config.hidden_act,
670
- )
671
-
672
- def forward(
673
- self,
674
- hidden_states: torch.Tensor,
675
- cu_seqlens: torch.Tensor,
676
- rotary_pos_emb: torch.Tensor | None = None,
677
- position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
678
- **kwargs,
679
- ) -> torch.Tensor:
680
- hidden_states = hidden_states + self.attn(
681
- self.norm1(hidden_states),
682
- cu_seqlens=cu_seqlens,
683
- rotary_pos_emb=rotary_pos_emb,
684
- position_embeddings=position_embeddings,
685
- **kwargs,
686
- )
687
- hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
688
- return hidden_states
689
-
690
-
691
- @auto_docstring
692
- class Ernie4_5_VL_MoePreTrainedModel(PreTrainedModel):
693
- config: Ernie4_5_VL_MoeConfig
694
- base_model_prefix = "model"
695
- input_modalities = ("image", "video", "text")
696
- supports_gradient_checkpointing = True
697
- _no_split_modules = ["Ernie4_5_VL_MoeDecoderLayer", "Ernie4_5_VL_MoeVisionBlock"]
698
- _skip_keys_device_placement = "past_key_values"
699
- _supports_flash_attn = True
700
- _supports_sdpa = True
701
- _can_compile_fullgraph = False
702
- _supports_attention_backend = True
703
-
704
- _can_record_outputs = {
705
- "router_logits": OutputRecorder(Ernie4_5_VL_MoeMoeBlock, index=1),
706
- "hidden_states": Ernie4_5_VL_MoeDecoderLayer,
707
- "attentions": Ernie4_5_VL_MoeTextAttention,
708
- }
709
- _keep_in_fp32_modules_strict = ["gate.weight", "moe_statics"]
710
-
711
- def _init_weights(self, module):
712
- super()._init_weights(module)
713
- if isinstance(module, Ernie4_5_VL_MoeMoeTopKRouter):
714
- init.zeros_(module.moe_statics.e_score_correction_bias)
715
- init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
716
- elif isinstance(module, Ernie4_5_VL_MoeMoeExperts):
717
- init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
718
- init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
719
- elif isinstance(module, Ernie4_5_VL_MoeVisionRotaryEmbedding):
720
- inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
721
- init.copy_(module.inv_freq, inv_freq)
722
-
723
-
724
- @auto_docstring
725
- class Ernie4_5_VL_MoeTextModel(Ernie4_5_VL_MoePreTrainedModel):
726
- config: Ernie4_5_VL_MoeTextConfig
727
-
728
- def __init__(self, config: Ernie4_5_VL_MoeTextConfig):
729
- super().__init__(config)
730
- self.padding_idx = config.pad_token_id
731
- self.vocab_size = config.vocab_size
732
-
733
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
734
- self.layers = nn.ModuleList(
735
- [Ernie4_5_VL_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
736
- )
737
- self.norm = Ernie4_5_VL_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
738
- self.rotary_emb = Ernie4_5_VL_MoeTextRotaryEmbedding(config=config)
739
- self.gradient_checkpointing = False
740
-
741
- # Initialize weights and apply final processing
742
- self.post_init()
743
-
744
- @check_model_inputs
745
- @auto_docstring
746
- def forward(
747
- self,
748
- input_ids: torch.LongTensor | None = None,
749
- attention_mask: torch.Tensor | None = None,
750
- position_ids: torch.LongTensor | None = None,
751
- moe_mm_token_type_ids: torch.IntTensor | None = None,
752
- past_key_values: Cache | None = None,
753
- inputs_embeds: torch.FloatTensor | None = None,
754
- use_cache: bool | None = None,
755
- cache_position: torch.LongTensor | None = None,
756
- **kwargs: Unpack[FlashAttentionKwargs],
757
- ) -> MoeModelOutputWithPast:
758
- r"""
759
- moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
760
- The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
761
- """
762
- if (input_ids is None) ^ (inputs_embeds is not None):
763
- raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
764
-
765
- if use_cache and past_key_values is None:
766
- past_key_values = DynamicCache(config=self.config)
767
-
768
- if inputs_embeds is None:
769
- inputs_embeds = self.embed_tokens(input_ids)
770
-
771
- if cache_position is None:
772
- past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
773
- cache_position = torch.arange(
774
- past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
775
- )
776
-
777
- # the hard coded `3` is for temporal, height and width.
778
- if position_ids is None:
779
- position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
780
- elif position_ids.ndim == 2:
781
- position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
782
-
783
- # NOTE: we need to pass text position ids for packing. Ernie 4.5 VL uses 3D positions
784
- # where each dim indicates visual spatial positions for temporal/height/width grids.
785
- # There are is only one scenario when FA2-like packed masking might be activated.
786
- # 1. User specifically passed packed `position_ids` and no attention mask.
787
- # In this case we expect the useer to create correct position ids for all 3 grids
788
- # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
789
- if position_ids.ndim == 3 and position_ids.shape[0] == 4:
790
- text_position_ids = position_ids[0]
791
- position_ids = position_ids[1:]
792
- else:
793
- # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
794
- text_position_ids = None
795
-
796
- attention_mask = create_causal_mask(
797
- config=self.config,
798
- input_embeds=inputs_embeds,
799
- attention_mask=attention_mask,
800
- cache_position=cache_position,
801
- past_key_values=past_key_values,
802
- position_ids=text_position_ids,
803
- )
804
-
805
- hidden_states = inputs_embeds
806
-
807
- # create position embeddings to be shared across the decoder layers
808
- position_embeddings = self.rotary_emb(hidden_states, position_ids)
809
-
810
- for decoder_layer in self.layers[: self.config.num_hidden_layers]:
811
- hidden_states = decoder_layer(
812
- hidden_states,
813
- position_embeddings=position_embeddings,
814
- attention_mask=attention_mask,
815
- position_ids=position_ids,
816
- moe_mm_token_type_ids=moe_mm_token_type_ids,
817
- past_key_values=past_key_values,
818
- cache_position=cache_position,
819
- **kwargs,
820
- )
821
-
822
- hidden_states = self.norm(hidden_states)
823
-
824
- return MoeModelOutputWithPast(
825
- last_hidden_state=hidden_states,
826
- past_key_values=past_key_values,
827
- )
828
-
829
-
830
- class Ernie4_5VLVisionMLP(nn.Module):
831
- def __init__(self, dim: int, hidden_dim: int, hidden_act: str) -> None:
832
- super().__init__()
833
- self.fc1 = nn.Linear(dim, hidden_dim)
834
- self.act = ACT2FN[hidden_act]
835
- self.fc2 = nn.Linear(hidden_dim, dim)
836
-
837
- def forward(self, x) -> torch.Tensor:
838
- return self.fc2(self.act(self.fc1(x)))
839
-
840
-
841
- class Ernie4_5_VL_MoePatchEmbed(nn.Module):
842
- def __init__(
843
- self,
844
- patch_size: int = 14,
845
- in_channels: int = 3,
846
- embed_dim: int = 1152,
847
- ) -> None:
848
- super().__init__()
849
- self.patch_size = patch_size
850
- self.in_channels = in_channels
851
- self.embed_dim = embed_dim
852
- self.proj = nn.Linear(in_channels * patch_size * patch_size, embed_dim, bias=False)
853
-
854
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
855
- target_dtype = self.proj.weight.dtype
856
- return self.proj(hidden_states.to(target_dtype))
857
-
858
-
859
- class Ernie4_5_VL_MoeVisionRotaryEmbedding(nn.Module):
860
- inv_freq: torch.Tensor # fix linting for `register_buffer`
861
-
862
- def __init__(self, dim: int, theta: float = 10000.0) -> None:
863
- super().__init__()
864
- self.dim = dim
865
- self.theta = theta
866
- inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
867
- self.register_buffer("inv_freq", inv_freq, persistent=False)
868
-
869
- def forward(self, seqlen: int) -> torch.Tensor:
870
- seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
871
- freqs = torch.outer(seq, self.inv_freq)
872
- return freqs
873
-
874
-
875
- @auto_docstring
876
- class Ernie4_5_VL_MoeVisionTransformerPretrainedModel(Ernie4_5_VL_MoePreTrainedModel):
877
- config: Ernie4_5_VL_MoeVisionConfig
878
- input_modalities = ("image", "video")
879
- _no_split_modules = ["Ernie4_5_VL_MoeVisionBlock"]
880
- _input_embed_layer = "patch_embed"
881
- _can_record_outputs = {
882
- "router_logits": OutputRecorder(Ernie4_5_VL_MoeMoeBlock, index=1),
883
- "hidden_states": Ernie4_5_VL_MoeVisionBlock,
884
- "attentions": Ernie4_5_VL_MoeVisionAttention,
885
- }
886
-
887
- def __init__(self, config) -> None:
888
- super().__init__(config)
889
- self.spatial_merge_size = config.spatial_merge_size
890
-
891
- self.patch_embed = Ernie4_5_VL_MoePatchEmbed(
892
- patch_size=config.patch_size,
893
- in_channels=config.in_channels,
894
- embed_dim=config.hidden_size,
895
- )
896
-
897
- head_dim = config.hidden_size // config.num_heads
898
- self.rotary_pos_emb = Ernie4_5_VL_MoeVisionRotaryEmbedding(head_dim // 2)
899
-
900
- self.blocks = nn.ModuleList([Ernie4_5_VL_MoeVisionBlock(config) for _ in range(config.depth)])
901
- self.gradient_checkpointing = False
902
-
903
- self.ln = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps)
904
-
905
- self.post_init()
906
-
907
- def rot_pos_emb(self, grid_thw):
908
- pos_ids = []
909
- for t, h, w in grid_thw:
910
- hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
911
- hpos_ids = hpos_ids.reshape(
912
- h // self.spatial_merge_size,
913
- self.spatial_merge_size,
914
- w // self.spatial_merge_size,
915
- self.spatial_merge_size,
916
- )
917
- hpos_ids = hpos_ids.permute(0, 2, 1, 3)
918
- hpos_ids = hpos_ids.flatten()
919
-
920
- wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
921
- wpos_ids = wpos_ids.reshape(
922
- h // self.spatial_merge_size,
923
- self.spatial_merge_size,
924
- w // self.spatial_merge_size,
925
- self.spatial_merge_size,
926
- )
927
- wpos_ids = wpos_ids.permute(0, 2, 1, 3)
928
- wpos_ids = wpos_ids.flatten()
929
- pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
930
- pos_ids = torch.cat(pos_ids, dim=0)
931
- max_grid_size = grid_thw[:, 1:].max()
932
- rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
933
- rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
934
- return rotary_pos_emb
935
-
936
- @check_model_inputs
937
- def forward(
938
- self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs: Unpack[TransformersKwargs]
939
- ) -> tuple | BaseModelOutputWithPooling:
940
- r"""
941
- grid_thw (`torch.LongTensor` of shape `(num_images, 3)`):
942
- The temporal, height and width dimensions of feature shape for each image. Each row contains [t, h, w] values.
943
- """
944
- hidden_states = self.patch_embed(hidden_states)
945
- rotary_pos_emb = self.rot_pos_emb(grid_thw)
946
- emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
947
- position_embeddings = (emb.cos(), emb.sin())
948
-
949
- cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
950
- dim=0,
951
- # Select dtype based on the following factors:
952
- # - FA2 requires that cu_seqlens_q must have dtype int32
953
- # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
954
- # See https://github.com/huggingface/transformers/pull/34852 for more information
955
- dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
956
- )
957
- cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
958
-
959
- for block in self.blocks:
960
- hidden_states = block(
961
- hidden_states,
962
- cu_seqlens=cu_seqlens,
963
- position_embeddings=position_embeddings,
964
- **kwargs,
965
- )
966
- hidden_states = self.ln(hidden_states)
967
- return BaseModelOutputWithPooling(last_hidden_state=hidden_states)
968
-
969
-
970
- class Ernie4_5_VL_MoeVisionMLP(nn.Module):
971
- def __init__(self, config, in_dim, out_dim):
972
- super().__init__()
973
-
974
- self.fc1 = nn.Linear(in_dim, out_dim)
975
- self.act_fn = nn.GELU()
976
- self.fc2 = nn.Linear(out_dim, out_dim)
977
- self.ln = nn.LayerNorm(out_dim, eps=config.vision_config.rms_norm_eps)
978
-
979
- def forward(self, hidden_states):
980
- hidden_states = self.fc1(hidden_states)
981
- hidden_states = self.act_fn(hidden_states)
982
- hidden_states = self.fc2(hidden_states)
983
- hidden_states = self.ln(hidden_states)
984
- return hidden_states
985
-
986
-
987
- class Ernie4_5_VL_MoeVariableResolutionResamplerModel(nn.Module):
988
- def __init__(self, config: Ernie4_5_VL_MoeConfig):
989
- super().__init__()
990
- self.config = config
991
-
992
- self.in_dim = config.vision_config.hidden_size
993
- self.out_dim = config.text_config.hidden_size
994
- self.spatial_merge_size = config.vision_config.spatial_merge_size
995
- self.temporal_merge_size = config.vision_config.temporal_merge_size
996
-
997
- # compress 2d conv(picture) to 1d
998
- self.spatial_dim = self.in_dim * self.spatial_merge_size**2
999
- # compress 3d conv(video) to 1d
1000
- self.temporal_dim = self.in_dim * self.spatial_merge_size**2 * self.temporal_merge_size
1001
-
1002
- self.spatial_linear = Ernie4_5_VL_MoeVisionMLP(config, self.spatial_dim, self.spatial_dim)
1003
- self.temporal_linear = Ernie4_5_VL_MoeVisionMLP(config, self.temporal_dim, self.spatial_dim)
1004
-
1005
- self.mlp = nn.Linear(self.spatial_dim, self.out_dim)
1006
- self.after_norm = Ernie4_5_VL_MoeRMSNorm(self.out_dim, config.text_config.rms_norm_eps)
1007
-
1008
- def _temporal_slicing(self, hidden_states, grid_thw):
1009
- """
1010
- Slices along the temporal dimension in even/odd patterns (usually if we have a video input)
1011
- or duplicates along temporal dimension (usually if we have an image input).
1012
-
1013
- Example:
1014
- Video input with temporal pattern of [1, -1, 2, -2, 3, -3]
1015
- > Even input [1, 2, 3], odd input [-1, -2, -3]
1016
- > Reorderd via slices to [1, 2, 3, -1, -2, -3]
1017
- Image input with temporal pattern [1]
1018
- > Duplicate input [1], [1]
1019
- > Reordered to [1, 1]
1020
-
1021
- NOTE: This is hard-coded for `temporal_merge_size == 2` and won't work otherwise.
1022
- """
1023
- # Calculating offsets on spatial dim (based on flattened tensors)
1024
- grid_t, grid_hw = grid_thw[:, 0], grid_thw[:, 1:]
1025
- grid_hw_after_conv = grid_hw.prod(-1) // (self.spatial_merge_size**2)
1026
-
1027
- # Calculating offsets on batch dim (based on flattened tensors)
1028
- tokens_per_img_or_vid = (grid_thw.prod(-1) // (self.spatial_merge_size**2)).flatten()
1029
- batch_offsets = torch.empty(tokens_per_img_or_vid.size(), dtype=tokens_per_img_or_vid.dtype)
1030
- batch_offsets[0] = 0
1031
- batch_offsets[1:] = tokens_per_img_or_vid.cumsum(dim=0)[:-1]
1032
-
1033
- first_slice_offsets = []
1034
- second_slice_offsets = []
1035
- for temporal_size, spatial_size, batch_offset in zip(grid_t, grid_hw_after_conv, batch_offsets):
1036
- # Depending on temporal, we may interleave:
1037
- # - Images have temporal == 1 --> same offsets (duplicate "frame" image)
1038
- # - Videos have temporal > 1 --> different offsets (even, odd)
1039
- first_offset_range = range(0, temporal_size, 2)
1040
- second_offset_range = range(1 if temporal_size > 1 else 0, temporal_size, 2)
1041
-
1042
- for temporal_offset_even, temporal_offset_odd in zip(first_offset_range, second_offset_range):
1043
- first_slice_offsets.append(
1044
- torch.arange(
1045
- batch_offset + (temporal_offset_even) * spatial_size,
1046
- batch_offset + (temporal_offset_even + 1) * spatial_size,
1047
- )
1048
- )
1049
- second_slice_offsets.append(
1050
- torch.arange(
1051
- batch_offset + (temporal_offset_odd) * spatial_size,
1052
- batch_offset + (temporal_offset_odd + 1) * spatial_size,
1053
- )
1054
- )
1055
-
1056
- # Input: [1, -1, 2, -2, 3, -3] or [1]
1057
- # Indices: [0, 2, 4] (even) or [0] (duplicate)
1058
- first_slice_offsets = torch.cat(first_slice_offsets, dim=-1).to(hidden_states.device)
1059
- # Indices: [1, 3, 5] (odd) or [0] (duplicate)
1060
- second_slice_offsets = torch.cat(second_slice_offsets, dim=-1).to(hidden_states.device)
1061
-
1062
- # Output: [1, 2, 3, -1, -2, -3] or [1, 1]
1063
- return torch.concat(
1064
- [
1065
- torch.index_select(hidden_states, dim=0, index=first_slice_offsets),
1066
- torch.index_select(hidden_states, dim=0, index=second_slice_offsets),
1067
- ],
1068
- dim=-1,
1069
- )
1070
-
1071
- def forward(self, hidden_states, grid_thw):
1072
- # image spatial
1073
- # reshape imitates convolution via linear projection
1074
- hidden_states = hidden_states.reshape([-1, hidden_states.shape[-1] * (self.spatial_merge_size**2)])
1075
- hidden_states = self.spatial_linear(hidden_states)
1076
-
1077
- # video temporal
1078
- hidden_states = self._temporal_slicing(hidden_states, grid_thw)
1079
- hidden_states = self.temporal_linear(hidden_states)
1080
-
1081
- # final mlp
1082
- hidden_states = self.mlp(hidden_states)
1083
- hidden_states = self.after_norm(hidden_states)
1084
-
1085
- return hidden_states
1086
-
1087
-
1088
- @auto_docstring
1089
- class Ernie4_5_VL_MoeModel(Ernie4_5_VL_MoePreTrainedModel):
1090
- base_model_prefix = "model"
1091
- _checkpoint_conversion_mapping = {"^norm": "language_model.norm"}
1092
- # Reference: fix gemma3 grad acc #37208
1093
- accepts_loss_kwargs = False
1094
- config: Ernie4_5_VL_MoeConfig
1095
- _no_split_modules = ["Ernie4_5_VL_MoeDecoderLayer", "Ernie4_5_VL_MoeVisionBlock"]
1096
-
1097
- def __init__(self, config: Ernie4_5_VL_MoeConfig):
1098
- super().__init__(config)
1099
- self.language_model = Ernie4_5_VL_MoeTextModel._from_config(config.text_config)
1100
- self.rope_deltas = None # cache rope_deltas here
1101
- self.vision_tower = Ernie4_5_VL_MoeVisionTransformerPretrainedModel._from_config(config.vision_config)
1102
- self.resampler_model = Ernie4_5_VL_MoeVariableResolutionResamplerModel(config)
1103
-
1104
- # Initialize weights and apply final processing
1105
- self.post_init()
1106
-
1107
- def get_input_embeddings(self):
1108
- return self.language_model.get_input_embeddings()
1109
-
1110
- def set_input_embeddings(self, value):
1111
- self.language_model.set_input_embeddings(value)
1112
-
1113
- def get_rope_index(
1114
- self,
1115
- input_ids: torch.LongTensor | None = None,
1116
- image_grid_thw: torch.LongTensor | None = None,
1117
- video_grid_thw: torch.LongTensor | None = None,
1118
- attention_mask: torch.Tensor | None = None,
1119
- mm_token_type_ids: torch.IntTensor | None = None,
1120
- ) -> tuple[torch.Tensor, torch.Tensor]:
1121
- """
1122
- Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
1123
-
1124
- Explanation:
1125
- Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
1126
-
1127
- For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
1128
- Examples:
1129
- input_ids: [T T T T T], here T is for text.
1130
- temporal position_ids: [0, 1, 2, 3, 4]
1131
- height position_ids: [0, 1, 2, 3, 4]
1132
- width position_ids: [0, 1, 2, 3, 4]
1133
-
1134
- For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
1135
- and 1D rotary position embedding for text part.
1136
- Examples:
1137
- Temporal (Time): 3 patches, representing different segments of the video in time.
1138
- Height: 2 patches, dividing each frame vertically.
1139
- Width: 2 patches, dividing each frame horizontally.
1140
- We also have some important parameters:
1141
- fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
1142
- tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
1143
- temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
1144
- interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
1145
- input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
1146
- vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
1147
- vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
1148
- vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
1149
- text temporal position_ids: [101, 102, 103, 104, 105]
1150
- text height position_ids: [101, 102, 103, 104, 105]
1151
- text width position_ids: [101, 102, 103, 104, 105]
1152
- Here we calculate the text start position_ids as the max vision position_ids plus 1.
1153
-
1154
- Args:
1155
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1156
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
1157
- it.
1158
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1159
- The temporal, height and width of feature shape of each image in LLM.
1160
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1161
- The temporal, height and width of feature shape of each video in LLM.
1162
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
1163
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1164
-
1165
- - 1 for tokens that are **not masked**,
1166
- - 0 for tokens that are **masked**.
1167
- mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1168
- Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1169
-
1170
- Returns:
1171
- position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
1172
- mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
1173
- """
1174
-
1175
- temporal_merge_size = self.config.vision_config.temporal_merge_size
1176
- spatial_merge_size = self.config.vision_config.spatial_merge_size
1177
-
1178
- mrope_position_deltas = []
1179
- if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
1180
- total_input_ids = input_ids
1181
- if attention_mask is None:
1182
- attention_mask = torch.ones_like(total_input_ids)
1183
- position_ids = torch.ones(
1184
- 3,
1185
- input_ids.shape[0],
1186
- input_ids.shape[1],
1187
- dtype=input_ids.dtype,
1188
- device=input_ids.device,
1189
- )
1190
- image_index, video_index = 0, 0
1191
- attention_mask = attention_mask.to(total_input_ids.device)
1192
- for i, input_ids in enumerate(total_input_ids):
1193
- # If we don't have `mm_token_type_ids`, then we have text tokens only (== 0)
1194
- if mm_token_type_ids is None:
1195
- input_token_type = torch.zeros_like(input_ids)[attention_mask[i] == 1].tolist()
1196
- else:
1197
- input_token_type = mm_token_type_ids[i, attention_mask[i] == 1].tolist()
1198
-
1199
- input_type_group = []
1200
- for key, group in itertools.groupby(enumerate(input_token_type), lambda x: x[1]):
1201
- group = list(group)
1202
- start_index = group[0][0]
1203
- end_index = group[-1][0] + 1
1204
- input_type_group.append((key, start_index, end_index))
1205
-
1206
- llm_pos_ids_list = []
1207
- for modality_type, start_idx, end_idx in input_type_group:
1208
- st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
1209
-
1210
- # text == 0
1211
- if modality_type == 0:
1212
- text_len = end_idx - start_idx
1213
- llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
1214
-
1215
- # image == 1, video == 2
1216
- else:
1217
- grid_thw = image_grid_thw if modality_type == 1 else video_grid_thw
1218
- mm_index = image_index if modality_type == 1 else video_index
1219
- t_merge_size = 1 if modality_type == 1 else temporal_merge_size
1220
-
1221
- t, h, w = (
1222
- grid_thw[mm_index][0],
1223
- grid_thw[mm_index][1],
1224
- grid_thw[mm_index][2],
1225
- )
1226
- llm_grid_t, llm_grid_h, llm_grid_w = (
1227
- t.item() // t_merge_size,
1228
- h.item() // spatial_merge_size,
1229
- w.item() // spatial_merge_size,
1230
- )
1231
-
1232
- for t_idx in range(llm_grid_t):
1233
- t_index = torch.tensor(t_idx).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
1234
- h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(1, -1, llm_grid_w).flatten()
1235
- w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(1, llm_grid_h, -1).flatten()
1236
- llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + st_idx)
1237
-
1238
- if modality_type == 1:
1239
- image_index += 1
1240
- else:
1241
- video_index += 1
1242
-
1243
- llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1244
- position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
1245
- mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
1246
- mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
1247
- return position_ids, mrope_position_deltas
1248
- else:
1249
- if attention_mask is not None:
1250
- position_ids = attention_mask.long().cumsum(-1) - 1
1251
- position_ids.masked_fill_(attention_mask == 0, 1)
1252
- position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
1253
- max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
1254
- mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
1255
- else:
1256
- position_ids = (
1257
- torch.arange(input_ids.shape[1], device=input_ids.device)
1258
- .view(1, 1, -1)
1259
- .expand(3, input_ids.shape[0], -1)
1260
- )
1261
- mrope_position_deltas = torch.zeros(
1262
- [input_ids.shape[0], 1],
1263
- device=input_ids.device,
1264
- dtype=input_ids.dtype,
1265
- )
1266
-
1267
- return position_ids, mrope_position_deltas
1268
-
1269
- @can_return_tuple
1270
- @auto_docstring
1271
- def get_video_features(
1272
- self,
1273
- pixel_values_videos: torch.FloatTensor,
1274
- video_grid_thw: torch.LongTensor | None = None,
1275
- **kwargs: Unpack[TransformersKwargs],
1276
- ) -> tuple | BaseModelOutputWithPooling:
1277
- r"""
1278
- pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1279
- The tensors corresponding to the input videos.
1280
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1281
- The temporal, height and width of feature shape of each video in LLM.
1282
- """
1283
- video_outputs = self.vision_tower(pixel_values_videos, video_grid_thw, return_dict=True, **kwargs)
1284
- video_embeds = self.resampler_model(video_outputs.last_hidden_state, video_grid_thw)
1285
- split_sizes = (
1286
- video_grid_thw.prod(-1)
1287
- // self.vision_tower.spatial_merge_size**2
1288
- // self.resampler_model.temporal_merge_size
1289
- ).tolist()
1290
- video_embeds = torch.split(video_embeds, split_sizes)
1291
- video_outputs.pooler_output = video_embeds
1292
- return video_outputs
1293
-
1294
- @can_return_tuple
1295
- @auto_docstring
1296
- def get_image_features(
1297
- self,
1298
- pixel_values: torch.FloatTensor,
1299
- image_grid_thw: torch.LongTensor | None = None,
1300
- **kwargs: Unpack[TransformersKwargs],
1301
- ) -> tuple | BaseModelOutputWithPooling:
1302
- r"""
1303
- pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1304
- The tensors corresponding to the input images.
1305
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1306
- The temporal, height and width of feature shape of each image in LLM.
1307
- """
1308
- image_outputs = self.vision_tower(pixel_values, image_grid_thw, return_dict=True, **kwargs)
1309
- image_embeds = self.resampler_model(image_outputs.last_hidden_state, image_grid_thw)
1310
- split_sizes = (image_grid_thw.prod(-1) // self.vision_tower.spatial_merge_size**2).tolist()
1311
- image_embeds = torch.split(image_embeds, split_sizes)
1312
- image_outputs.pooler_output = image_embeds
1313
- return image_outputs
1314
-
1315
- def get_placeholder_mask(
1316
- self,
1317
- input_ids: torch.LongTensor,
1318
- inputs_embeds: torch.FloatTensor,
1319
- image_features: torch.FloatTensor | None = None,
1320
- video_features: torch.FloatTensor | None = None,
1321
- ):
1322
- """
1323
- Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
1324
- equal to the length of multimodal features. If the lengths are different, an error is raised.
1325
- """
1326
- if input_ids is None:
1327
- special_image_mask = inputs_embeds == self.get_input_embeddings()(
1328
- torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
1329
- )
1330
- special_image_mask = special_image_mask.all(-1)
1331
- special_video_mask = inputs_embeds == self.get_input_embeddings()(
1332
- torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
1333
- )
1334
- special_video_mask = special_video_mask.all(-1)
1335
- else:
1336
- special_image_mask = input_ids == self.config.image_token_id
1337
- special_video_mask = input_ids == self.config.video_token_id
1338
-
1339
- n_image_tokens = special_image_mask.sum()
1340
- special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1341
- if image_features is not None:
1342
- torch_compilable_check(
1343
- inputs_embeds[special_image_mask].numel() == image_features.numel(),
1344
- f"Image features and image tokens do not match, tokens: {n_image_tokens}, features: {image_features.shape[0]}",
1345
- )
1346
-
1347
- n_video_tokens = special_video_mask.sum()
1348
- special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1349
- if video_features is not None:
1350
- torch_compilable_check(
1351
- inputs_embeds[special_video_mask].numel() == video_features.numel(),
1352
- f"Video features and video tokens do not match, tokens: {n_video_tokens}, features: {video_features.shape[0]}",
1353
- )
1354
- return special_image_mask, special_video_mask
1355
-
1356
- @auto_docstring
1357
- @can_return_tuple
1358
- def forward(
1359
- self,
1360
- input_ids: torch.LongTensor = None,
1361
- attention_mask: torch.Tensor | None = None,
1362
- position_ids: torch.LongTensor | None = None,
1363
- mm_token_type_ids: torch.IntTensor | None = None,
1364
- moe_mm_token_type_ids: torch.IntTensor | None = None,
1365
- past_key_values: Cache | None = None,
1366
- inputs_embeds: torch.FloatTensor | None = None,
1367
- use_cache: bool | None = None,
1368
- pixel_values: torch.Tensor | None = None,
1369
- pixel_values_videos: torch.FloatTensor | None = None,
1370
- image_grid_thw: torch.LongTensor | None = None,
1371
- video_grid_thw: torch.LongTensor | None = None,
1372
- rope_deltas: torch.LongTensor | None = None,
1373
- cache_position: torch.LongTensor | None = None,
1374
- **kwargs: Unpack[TransformersKwargs],
1375
- ) -> tuple | MoeModelOutputWithPast:
1376
- r"""
1377
- mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1378
- Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1379
- moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1380
- The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
1381
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1382
- The temporal, height and width of feature shape of each image in LLM.
1383
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1384
- The temporal, height and width of feature shape of each video in LLM.
1385
- rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1386
- The rope index difference between sequence length and multimodal rope.
1387
- """
1388
- if inputs_embeds is None:
1389
- inputs_embeds = self.get_input_embeddings()(input_ids)
1390
-
1391
- if pixel_values is not None:
1392
- image_embeds = self.get_image_features(pixel_values, image_grid_thw, return_dict=True).pooler_output
1393
- image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1394
- image_mask, _ = self.get_placeholder_mask(
1395
- input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
1396
- )
1397
- inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1398
-
1399
- if pixel_values_videos is not None:
1400
- video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw, return_dict=True).pooler_output
1401
- video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1402
- _, video_mask = self.get_placeholder_mask(
1403
- input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1404
- )
1405
- inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
1406
-
1407
- if position_ids is None:
1408
- position_ids = self.get_position_ids(
1409
- input_ids=input_ids,
1410
- attention_mask=attention_mask,
1411
- past_key_values=past_key_values,
1412
- inputs_embeds=inputs_embeds,
1413
- image_grid_thw=image_grid_thw,
1414
- video_grid_thw=video_grid_thw,
1415
- cache_position=cache_position,
1416
- mm_token_type_ids=mm_token_type_ids,
1417
- )
1418
-
1419
- outputs = self.language_model(
1420
- input_ids=None,
1421
- position_ids=position_ids,
1422
- moe_mm_token_type_ids=moe_mm_token_type_ids,
1423
- attention_mask=attention_mask,
1424
- use_cache=use_cache,
1425
- past_key_values=past_key_values,
1426
- inputs_embeds=inputs_embeds,
1427
- return_dict=True,
1428
- cache_position=cache_position,
1429
- **kwargs,
1430
- )
1431
-
1432
- return MoeModelOutputWithPast(
1433
- last_hidden_state=outputs.last_hidden_state,
1434
- past_key_values=outputs.past_key_values,
1435
- hidden_states=outputs.hidden_states,
1436
- attentions=outputs.attentions,
1437
- router_logits=outputs.router_logits,
1438
- )
1439
-
1440
- # TODO: Should be moved to generation loop instead in the future
1441
- # Relevant PR(s): https://github.com/huggingface/transformers/pull/42088
1442
- def get_position_ids(
1443
- self,
1444
- input_ids: torch.LongTensor = None,
1445
- attention_mask: torch.Tensor | None = None,
1446
- past_key_values: Cache | None = None,
1447
- inputs_embeds: torch.FloatTensor | None = None,
1448
- image_grid_thw: torch.LongTensor | None = None,
1449
- video_grid_thw: torch.LongTensor | None = None,
1450
- cache_position: torch.LongTensor | None = None,
1451
- mm_token_type_ids: torch.IntTensor | None = None,
1452
- ):
1453
- """
1454
- Calculating the 3D position ids with a custom mechanism / caching
1455
- - First forward calculates the initial positions and the respective
1456
- deltas (offset) for subsequent positions. See `get_rope_index` for
1457
- more details.
1458
- - Second and on (generation), uses the cache position combined with the
1459
- cached deltas to determine the current position.
1460
-
1461
- NOTE: We assume that the position ids are `None` and recalculate them here in any case.
1462
- """
1463
- # Calculate RoPE index once per generation in the pre-fill stage only.
1464
- # When compiling, we can't check tensor values thus we check only input length
1465
- # It is safe to assume that `length!=1` means we're in pre-fill because compiled
1466
- # models currently cannot do asssisted decoding
1467
- prefill_compiled_stage = is_torchdynamo_compiling() and (
1468
- (input_ids is not None and input_ids.shape[1] != 1)
1469
- or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
1470
- )
1471
- prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
1472
- (cache_position is not None and cache_position[0] == 0)
1473
- or (past_key_values is None or past_key_values.get_seq_length() == 0)
1474
- )
1475
- if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
1476
- position_ids, rope_deltas = self.get_rope_index(
1477
- input_ids,
1478
- image_grid_thw,
1479
- video_grid_thw,
1480
- attention_mask=attention_mask,
1481
- mm_token_type_ids=mm_token_type_ids,
1482
- )
1483
- self.rope_deltas = rope_deltas
1484
- # then use the prev pre-calculated rope-deltas to get the correct position ids
1485
- else:
1486
- if input_ids is not None:
1487
- batch_size, seq_length, device = input_ids.shape[0], 1, input_ids.device
1488
- elif inputs_embeds is not None:
1489
- batch_size, seq_length, device = inputs_embeds.shape[0], 1, inputs_embeds.device
1490
- else:
1491
- raise ValueError(
1492
- "Cannot calculate position ids without any input to the model. "
1493
- "Need either `input_ids` or `inputs_embeds`!"
1494
- )
1495
-
1496
- delta = (cache_position[0] + self.rope_deltas).to(device) if cache_position is not None else 0
1497
- position_ids = torch.arange(seq_length, device=device)
1498
- position_ids = position_ids.view(1, -1).expand(batch_size, -1)
1499
- if cache_position is not None: # otherwise `deltas` is an int `0`
1500
- delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
1501
- position_ids = position_ids.add(delta)
1502
- position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
1503
-
1504
- return position_ids
1505
-
1506
-
1507
- def load_balancing_loss_func(
1508
- gate_logits: torch.Tensor | tuple[torch.Tensor] | None,
1509
- num_experts: int | None = None,
1510
- top_k=2,
1511
- attention_mask: torch.Tensor | None = None,
1512
- ) -> torch.Tensor | int:
1513
- r"""
1514
- Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
1515
-
1516
- See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
1517
- function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
1518
- experts is too unbalanced.
1519
-
1520
- Args:
1521
- gate_logits:
1522
- Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
1523
- shape [batch_size X sequence_length, num_experts].
1524
- num_experts:
1525
- Number of experts
1526
- top_k:
1527
- The number of experts to route per-token, can be also interpreted as the `top-k` routing
1528
- parameter.
1529
- attention_mask (`torch.Tensor`, *optional*):
1530
- The attention_mask used in forward function
1531
- shape [batch_size X sequence_length] if not None.
1532
-
1533
- Returns:
1534
- The auxiliary loss.
1535
- """
1536
- if gate_logits is None or not isinstance(gate_logits, tuple):
1537
- return 0
1538
-
1539
- if isinstance(gate_logits, tuple):
1540
- compute_device = gate_logits[0].device
1541
- concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
1542
-
1543
- routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
1544
-
1545
- _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
1546
-
1547
- expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
1548
-
1549
- if attention_mask is None:
1550
- # Compute the percentage of tokens routed to each experts
1551
- tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
1552
-
1553
- # Compute the average probability of routing to these experts
1554
- router_prob_per_expert = torch.mean(routing_weights, dim=0)
1555
- else:
1556
- batch_size, sequence_length = attention_mask.shape
1557
- num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
1558
-
1559
- # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
1560
- expert_attention_mask = (
1561
- attention_mask[None, :, :, None, None]
1562
- .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
1563
- .reshape(-1, top_k, num_experts)
1564
- .to(compute_device)
1565
- )
1566
-
1567
- # Compute the percentage of tokens routed to each experts
1568
- tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
1569
- expert_attention_mask, dim=0
1570
- )
1571
-
1572
- # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
1573
- router_per_expert_attention_mask = (
1574
- attention_mask[None, :, :, None]
1575
- .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
1576
- .reshape(-1, num_experts)
1577
- .to(compute_device)
1578
- )
1579
-
1580
- # Compute the average probability of routing to these experts
1581
- router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
1582
- router_per_expert_attention_mask, dim=0
1583
- )
1584
-
1585
- overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
1586
- return overall_loss * num_experts
1587
-
1588
-
1589
- class Ernie4_5_VL_MoeForConditionalGeneration(Ernie4_5_VL_MoePreTrainedModel, GenerationMixin):
1590
- _checkpoint_conversion_mapping = {"^model.norm": "model.language_model.norm"}
1591
- _tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
1592
- # Reference: fix gemma3 grad acc #37208
1593
- accepts_loss_kwargs = False
1594
-
1595
- def __init__(self, config):
1596
- super().__init__(config)
1597
- self.model = Ernie4_5_VL_MoeModel(config)
1598
- self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1599
-
1600
- self.router_aux_loss_coef = config.text_config.router_aux_loss_coef
1601
- self.num_experts = config.text_config.moe_num_experts
1602
- self.num_experts_per_tok = config.text_config.moe_k
1603
-
1604
- self.post_init()
1605
-
1606
- def get_input_embeddings(self):
1607
- return self.model.get_input_embeddings()
1608
-
1609
- def set_input_embeddings(self, value):
1610
- self.model.set_input_embeddings(value)
1611
-
1612
- @auto_docstring
1613
- def get_video_features(
1614
- self,
1615
- pixel_values_videos: torch.FloatTensor,
1616
- video_grid_thw: torch.LongTensor | None = None,
1617
- **kwargs: Unpack[TransformersKwargs],
1618
- ) -> tuple | BaseModelOutputWithPooling:
1619
- r"""
1620
- pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1621
- The tensors corresponding to the input videos.
1622
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1623
- The temporal, height and width of feature shape of each video in LLM.
1624
- """
1625
- return self.model.get_video_features(
1626
- pixel_values_videos=pixel_values_videos, video_grid_thw=video_grid_thw, **kwargs
1627
- )
1628
-
1629
- @auto_docstring
1630
- def get_image_features(
1631
- self,
1632
- pixel_values: torch.FloatTensor,
1633
- image_grid_thw: torch.LongTensor | None = None,
1634
- **kwargs: Unpack[TransformersKwargs],
1635
- ) -> tuple | BaseModelOutputWithPooling:
1636
- r"""
1637
- pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1638
- The tensors corresponding to the input images.
1639
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1640
- The temporal, height and width of feature shape of each image in LLM.
1641
- """
1642
- return self.model.get_image_features(pixel_values=pixel_values, image_grid_thw=image_grid_thw, **kwargs)
1643
-
1644
- @auto_docstring
1645
- @can_return_tuple
1646
- def forward(
1647
- self,
1648
- input_ids: torch.LongTensor = None,
1649
- attention_mask: torch.Tensor | None = None,
1650
- position_ids: torch.LongTensor | None = None,
1651
- mm_token_type_ids: torch.IntTensor | None = None,
1652
- moe_mm_token_type_ids: torch.IntTensor | None = None,
1653
- past_key_values: Cache | None = None,
1654
- inputs_embeds: torch.FloatTensor | None = None,
1655
- labels: torch.LongTensor | None = None,
1656
- use_cache: bool | None = None,
1657
- output_router_logits: bool | None = None,
1658
- pixel_values: torch.Tensor | None = None,
1659
- pixel_values_videos: torch.FloatTensor | None = None,
1660
- image_grid_thw: torch.LongTensor | None = None,
1661
- video_grid_thw: torch.LongTensor | None = None,
1662
- rope_deltas: torch.LongTensor | None = None,
1663
- cache_position: torch.LongTensor | None = None,
1664
- logits_to_keep: int | torch.Tensor = 0,
1665
- **kwargs: Unpack[TransformersKwargs],
1666
- ) -> tuple | MoeCausalLMOutputWithPast:
1667
- r"""
1668
- mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1669
- Token type ids matching each modality to a different value in the input sequence, i.e. text (0), image (1), video (2).
1670
- moe_mm_token_type_ids (`torch.IntTensor` of shape `(batch_size, sequence_length)`, *optional*):
1671
- The same as `mm_token_type_ids` while additionally considering start/end image/video tokens as respective vision tokens.
1672
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1673
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1674
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1675
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1676
- image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1677
- The temporal, height and width of feature shape of each image in LLM.
1678
- video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1679
- The temporal, height and width of feature shape of each video in LLM.
1680
- rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
1681
- The rope index difference between sequence length and multimodal rope.
1682
- """
1683
- output_router_logits = (
1684
- output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits
1685
- )
1686
-
1687
- outputs = self.model(
1688
- input_ids=input_ids,
1689
- attention_mask=attention_mask,
1690
- position_ids=position_ids,
1691
- mm_token_type_ids=mm_token_type_ids,
1692
- moe_mm_token_type_ids=moe_mm_token_type_ids,
1693
- past_key_values=past_key_values,
1694
- inputs_embeds=inputs_embeds,
1695
- use_cache=use_cache,
1696
- output_router_logits=output_router_logits,
1697
- return_dict=True,
1698
- pixel_values=pixel_values,
1699
- pixel_values_videos=pixel_values_videos,
1700
- image_grid_thw=image_grid_thw,
1701
- video_grid_thw=video_grid_thw,
1702
- rope_deltas=rope_deltas,
1703
- cache_position=cache_position,
1704
- **kwargs,
1705
- )
1706
-
1707
- hidden_states = outputs.last_hidden_state
1708
- # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1709
- slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1710
- logits = self.lm_head(hidden_states[:, slice_indices, :])
1711
-
1712
- loss = None
1713
- if labels is not None:
1714
- loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
1715
-
1716
- aux_loss = None
1717
- if output_router_logits:
1718
- aux_loss = load_balancing_loss_func(
1719
- outputs.router_logits,
1720
- self.num_experts,
1721
- self.num_experts_per_tok,
1722
- attention_mask,
1723
- )
1724
- if labels is not None:
1725
- loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
1726
-
1727
- return MoeCausalLMOutputWithPast(
1728
- loss=loss,
1729
- aux_loss=aux_loss,
1730
- logits=logits,
1731
- past_key_values=outputs.past_key_values,
1732
- hidden_states=outputs.hidden_states,
1733
- attentions=outputs.attentions,
1734
- router_logits=outputs.router_logits,
1735
- )
1736
-
1737
- def prepare_inputs_for_generation(
1738
- self,
1739
- input_ids,
1740
- inputs_embeds=None,
1741
- attention_mask=None,
1742
- cache_position=None,
1743
- past_key_values=None,
1744
- image_grid_thw=None,
1745
- video_grid_thw=None,
1746
- use_cache=True,
1747
- is_first_iteration=False,
1748
- # Intentionally ignore position ids to force custom cache logic
1749
- position_ids=None,
1750
- **kwargs,
1751
- ):
1752
- model_inputs = super().prepare_inputs_for_generation(
1753
- input_ids,
1754
- inputs_embeds=inputs_embeds,
1755
- attention_mask=attention_mask,
1756
- cache_position=cache_position,
1757
- past_key_values=past_key_values,
1758
- image_grid_thw=image_grid_thw,
1759
- video_grid_thw=video_grid_thw,
1760
- use_cache=use_cache,
1761
- is_first_iteration=is_first_iteration,
1762
- **kwargs,
1763
- )
1764
-
1765
- # Using our own caching with rope delta
1766
- model_inputs["position_ids"] = self.model.get_position_ids(
1767
- input_ids=model_inputs.get("input_ids"),
1768
- attention_mask=model_inputs.get("attention_mask"),
1769
- past_key_values=model_inputs.get("past_key_values"),
1770
- inputs_embeds=model_inputs.get("inputs_embeds"),
1771
- image_grid_thw=model_inputs.get("image_grid_thw"),
1772
- video_grid_thw=model_inputs.get("video_grid_thw"),
1773
- cache_position=model_inputs.get("cache_position"),
1774
- mm_token_type_ids=model_inputs.get("mm_token_type_ids"),
1775
- )
1776
-
1777
- if not is_first_iteration and use_cache:
1778
- model_inputs["pixel_values"] = None
1779
- model_inputs["pixel_values_videos"] = None
1780
- model_inputs["mm_token_type_ids"] = None
1781
- model_inputs["moe_mm_token_type_ids"] = None
1782
-
1783
- return model_inputs
1784
-
1785
- def _get_image_nums_and_video_nums(
1786
- self,
1787
- input_ids: torch.LongTensor | None,
1788
- inputs_embeds: torch.Tensor | None = None,
1789
- ) -> tuple[torch.Tensor, torch.Tensor]:
1790
- """
1791
- Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
1792
- These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
1793
-
1794
- Args:
1795
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1796
- Indices of input sequence tokens in the vocabulary.
1797
-
1798
- Returns:
1799
- image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
1800
- video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
1801
- """
1802
-
1803
- if inputs_embeds is not None:
1804
- is_image = (
1805
- inputs_embeds
1806
- == self.get_input_embeddings()(
1807
- torch.tensor(self.config.image_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1808
- )
1809
- )[..., 0]
1810
- is_video_start = (
1811
- inputs_embeds
1812
- == self.get_input_embeddings()(
1813
- torch.tensor(self.config.video_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1814
- )
1815
- )[..., 0]
1816
- is_video_end = (
1817
- inputs_embeds
1818
- == self.get_input_embeddings()(
1819
- torch.tensor(self.config.video_end_token_id, dtype=torch.long, device=inputs_embeds.device)
1820
- )
1821
- )[..., 0]
1822
- else:
1823
- is_image = input_ids == self.config.image_start_token_id
1824
- is_video_start = input_ids == self.config.video_start_token_id
1825
- is_video_end = input_ids == self.config.video_end_token_id
1826
-
1827
- # Cumulative sum to track if we're inside a video span
1828
- # We'll assume well-formed video tags (i.e. matching starts and ends)
1829
- video_level = torch.cumsum(is_video_start.int() - is_video_end.int(), dim=1)
1830
- inside_video = video_level > 0 # shape (batch_size, seq_length)
1831
-
1832
- # Mask out image tokens that are inside video spans
1833
- standalone_images = is_image & (~inside_video)
1834
-
1835
- # Count per batch
1836
- image_counts = standalone_images.sum(dim=1)
1837
- video_counts = is_video_start.sum(dim=1)
1838
-
1839
- return image_counts, video_counts
1840
-
1841
- def _expand_inputs_for_generation(
1842
- self,
1843
- expand_size: int = 1,
1844
- is_encoder_decoder: bool = False,
1845
- input_ids: torch.LongTensor | None = None,
1846
- **model_kwargs,
1847
- ) -> tuple[torch.LongTensor, dict[str, Any]]:
1848
- # Overwritten -- Support for expanding tensors without a batch size dimension
1849
- # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
1850
- # pixel_values.shape[0] is sum(seqlen_images for samples)
1851
- # image_grid_thw.shape[0] is sum(num_images for samples)
1852
-
1853
- if expand_size == 1:
1854
- return input_ids, model_kwargs
1855
-
1856
- visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"]
1857
-
1858
- def _expand_dict_for_generation_visual(dict_to_expand):
1859
- image_grid_thw = model_kwargs.get("image_grid_thw", None)
1860
- video_grid_thw = model_kwargs.get("video_grid_thw", None)
1861
- image_nums, video_nums = self._get_image_nums_and_video_nums(
1862
- input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
1863
- )
1864
-
1865
- def _repeat_interleave_samples(x, lengths, repeat_times):
1866
- samples = torch.split(x, lengths)
1867
- repeat_args = [repeat_times] + [1] * (x.dim() - 1)
1868
- result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
1869
- return result
1870
-
1871
- for key in dict_to_expand:
1872
- if key == "pixel_values":
1873
- # split images into samples
1874
- samples = torch.split(image_grid_thw, list(image_nums))
1875
- # compute the sequence length of images for each sample
1876
- lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1877
- dict_to_expand[key] = _repeat_interleave_samples(
1878
- dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1879
- )
1880
- elif key == "image_grid_thw":
1881
- # get the num of images for each sample
1882
- lengths = list(image_nums)
1883
- dict_to_expand[key] = _repeat_interleave_samples(
1884
- dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1885
- )
1886
- elif key == "pixel_values_videos":
1887
- samples = torch.split(video_grid_thw, list(video_nums))
1888
- lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1889
- dict_to_expand[key] = _repeat_interleave_samples(
1890
- dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1891
- )
1892
- elif key == "video_grid_thw":
1893
- lengths = list(video_nums)
1894
- dict_to_expand[key] = _repeat_interleave_samples(
1895
- dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1896
- )
1897
- elif key == "second_per_grid_ts":
1898
- dict_to_expand[key] = _repeat_interleave_samples(
1899
- dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size
1900
- )
1901
- return dict_to_expand
1902
-
1903
- def _expand_dict_for_generation(dict_to_expand):
1904
- for key in dict_to_expand:
1905
- if (
1906
- key != "cache_position"
1907
- and dict_to_expand[key] is not None
1908
- and isinstance(dict_to_expand[key], torch.Tensor)
1909
- and key not in visual_keys
1910
- ):
1911
- dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
1912
- return dict_to_expand
1913
-
1914
- model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
1915
-
1916
- if input_ids is not None:
1917
- input_ids = input_ids.repeat_interleave(expand_size, dim=0)
1918
-
1919
- model_kwargs = _expand_dict_for_generation(model_kwargs)
1920
-
1921
- if is_encoder_decoder:
1922
- if model_kwargs.get("encoder_outputs") is None:
1923
- raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
1924
- model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
1925
-
1926
- return input_ids, model_kwargs
1927
-
1928
-
1929
- __all__ = [
1930
- "Ernie4_5_VL_MoePreTrainedModel",
1931
- "Ernie4_5_VL_MoeForConditionalGeneration",
1932
- "Ernie4_5_VL_MoeModel",
1933
- "Ernie4_5_VL_MoeTextModel",
1934
- "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
1935
- "Ernie4_5_VL_MoeVariableResolutionResamplerModel",
1936
- ]